ios: EXC_ARM_DA_ALIGN error in release build - ios

I have a function in my application, that store data from buffer. It works fine in debug mode both device and simulator, but when I create .ipa and run it on device, I have EXC_ARM_DA_ALIGN error libstdc++.6.dylib std::string::_M_replace_safe(unsigned long, unsigned long, char const, unsigned long)
struct stMemoryBlock
{
stMemoryBlock(void* InData, int InSize)
{
data = InData;
size = InSize;
offset = 0;
};
void* data;
unsigned int size;
unsigned int offset;
};
//-----------------------------------------------
char* cDataCollector::TestMemoryThink(char* Buffer, int BufferSize, int TestOffset, int TestSize)
{
char* result = NULL;
if (TestOffset + TestSize <= BufferSize)
{
result = &Buffer[TestOffset];
}
return result;
}
//-----------------------------------------------------
bool cDataCollector::StoreBinaryData(void* DataBuffer, int DataSize)
{
bool result = false;
char* InBuffer = (char *)DataBuffer;
if (!mPreparedData && !mPreparedDataSize && !mMemoryMap.size())
{
unsigned int CountElements = 0;
int offset = sizeof(unsigned int);
if (DataSize >= sizeof(unsigned int))
{
// CountElements = *(unsigned int*)(&InBuffer[0]);
memcpy(&CountElements, InBuffer, sizeof(CountElements));
}
result = true;
for (unsigned int i = 0; (i < CountElements) && result; ++i)
{
std::string ThinkName ;
stMemoryBlock * MemoryBlock = NULL;
result = result && TestMemoryThink(InBuffer, DataSize, offset, 0) != NULL;
if (result)
{
size_t name_think_size = strlen(&InBuffer[offset]);
char* think_name = TestMemoryThink(InBuffer, DataSize, offset, 0);
result = result && (think_name != NULL);
if (result)
{
ThinkName = think_name;
offset += (name_think_size + 1);
}
}
this line cause an error:
ThinkName = think_name;
maybe I need another way to read a string from memory location that isn’t word (32-bit) aligned? please,help!

Related

How to do real-time pitch shifting from mic with Superpowered? [duplicate]

This question already has answers here:
Superpowered: real time pitch shift with timestretcher not working
(2 answers)
Closed 5 years ago.
I'm trying to make a pitch shift in real time from a microphone using superpowerd. I looked at the example that is for the file. Also tried to adapt it. I managed to change the sound, but it turned out very distorted with interference. What am I doing wrong? where to find more information on superpowered and timeStretching?
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
float tempBuffer[numberOfSamples * 2 + 16];
SuperpoweredInterleave(buffers[0], buffers[1], tempBuffer, numberOfSamples);
float *outputBuffer = tempBuffer;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.samplePosition = 0;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = self->timeStretcher->numberOfInputSamplesNeeded;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer(self->timeStretcher->numberOfInputSamplesNeeded * 8 + 64);
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
memcpy((float *)inputBuffer.buffers[0], outputBuffer, numberOfSamples * 2 + 16);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int sampleCount = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&sampleCount);
if (!timeStretchedAudio) break;
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numberOfSamples);
};
// Clear the output buffer list.
self->outputBuffers->clear();
};
return true;
}
I did the following:
static bool audioProcessing(void *clientdata,
float **buffers,
unsigned int inputChannels,
unsigned int outputChannels,
unsigned int numberOfSamples,
unsigned int samplerate,
uint64_t hostTime) {
__unsafe_unretained Superpowered *self = (__bridge Superpowered *)clientdata;
SuperpoweredAudiobufferlistElement inputBuffer;
inputBuffer.startSample = 0;
inputBuffer.samplesUsed = 0;
inputBuffer.endSample = numberOfSamples;
inputBuffer.buffers[0] = SuperpoweredAudiobufferPool::getBuffer((unsigned int) (numberOfSamples * 8 + 64));
inputBuffer.buffers[1] = inputBuffer.buffers[2] = inputBuffer.buffers[3] = NULL;
SuperpoweredInterleave(buffers[0], buffers[1], (float *)inputBuffer.buffers[0], numberOfSamples);
self->timeStretcher->process(&inputBuffer, self->outputBuffers);
// Do we have some output?
if (self->outputBuffers->makeSlice(0, self->outputBuffers->sampleLength)) {
while (true) { // Iterate on every output slice.
// Get pointer to the output samples.
int numSamples = 0;
float *timeStretchedAudio = (float *)self->outputBuffers->nextSliceItem(&numSamples);
if (!timeStretchedAudio || *timeStretchedAudio == 0) {
break;
}
SuperpoweredDeInterleave(timeStretchedAudio, buffers[0], buffers[1], numSamples);
}
// Clear the output buffer list.
self->outputBuffers->clear();
}
return true;
}
This might not work correctly when changing the speed also, but I wanted live pitch shifting only. People should be able to speak slower or faster themselves.

string.match works but string.find failed in cocos lua on Android when using Chinese

I use string.find('中国', '中') and string.match('中国', '中') to search in cocos lua.
It's weird that they both worked on Win32, however, string.find('中国', '中') failed on Android.
I read the source code of lua. http://www.lua.org/source/5.1/lstrlib.c.html#str_find_aux
It seems that lmemfind and match works diffently on different platforms.
static int str_find (lua_State *L) {
return str_find_aux(L, 1);
}
static int str_match (lua_State *L) {
return str_find_aux(L, 0);
}
static int str_find_aux (lua_State *L, int find) {
size_t l1, l2;
const char *s = luaL_checklstring(L, 1, &l1);
const char *p = luaL_checklstring(L, 2, &l2);
ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1;
if (init < 0) init = 0;
else if ((size_t)(init) > l1) init = (ptrdiff_t)l1;
if (find && (lua_toboolean(L, 4) || /* explicit request? */
strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */
/* do a plain search */
const char *s2 = lmemfind(s+init, l1-init, p, l2);
if (s2) {
lua_pushinteger(L, s2-s+1);
lua_pushinteger(L, s2-s+l2);
return 2;
}
}
else {
MatchState ms;
int anchor = (*p == '^') ? (p++, 1) : 0;
const char *s1=s+init;
ms.L = L;
ms.src_init = s;
ms.src_end = s+l1;
do {
const char *res;
ms.level = 0;
if ((res=match(&ms, s1, p)) != NULL) {
if (find) {
lua_pushinteger(L, s1-s+1); /* start */
lua_pushinteger(L, res-s); /* end */
return push_captures(&ms, NULL, 0) + 2;
}
else
return push_captures(&ms, s1, res);
}
} while (s1++ < ms.src_end && !anchor);
}
lua_pushnil(L); /* not found */
return 1;
}

Objective-C division on 32/64-bit device produces different results

As described in the title, when I try to do the following divsion I get two different results depending on the architecture of the device:
unsigned int a = 42033;
unsigned int b = 360;
unsigned int c = 466
double result = a / (double)(b * c);
// on arm64 -> result = 0.25055436337625181
// on armv7 -> result = 0.24986030696800732
Why the results do not match?
According to Apple 64-Bit Transition Guide for Cocoa Touch these data type have the same size in 32 and 64 bit runtime.
EDIT
The complete code:
#import "UIImage+MyCategory.h"
#define CLIP_THRESHOLD 0.74 // if this much of the image is the clip color, leave it alone
typedef struct {
unsigned int leftNonColorIndex;
unsigned int rightNonColorIndex;
unsigned int nonColorCount;
} scanLineResult;
static inline scanLineResult scanOneLine(unsigned int *scanline, unsigned int count, unsigned int color, unsigned int mask) {
scanLineResult result = {UINT32_MAX, 0, 0};
for (int i = 0; i < count; i++) {
if ((*scanline++ & mask) != color) {
result.nonColorCount++;
result.leftNonColorIndex = MIN(result.leftNonColorIndex, i);
result.rightNonColorIndex = MAX(result.rightNonColorIndex, i);
}
}
return result;
}
typedef struct {
unsigned int leftNonColorIndex;
unsigned int topNonColorIndex;
unsigned int rightNonColorIndex;
unsigned int bottomNonColorIndex;
unsigned int nonColorCount;
double colorRatio;
} colorBoundaries;
static colorBoundaries findTrimColorBoundaries(unsigned int *buffer,
unsigned int width,
unsigned int height,
unsigned int bytesPerRow,
unsigned int color,
unsigned int mask)
{
colorBoundaries result = {UINT32_MAX, UINT32_MAX, 0, 0, 0.0};
unsigned int *currentLine = buffer;
for (int i = 0; i < height; i++) {
scanLineResult lineResult = scanOneLine(currentLine, width, color, mask);
if (lineResult.nonColorCount) {
result.nonColorCount += lineResult.nonColorCount;
result.topNonColorIndex = MIN(result.topNonColorIndex, i);
result.bottomNonColorIndex = MAX(result.bottomNonColorIndex, i);
result.leftNonColorIndex = MIN(result.leftNonColorIndex, lineResult.leftNonColorIndex);
result.rightNonColorIndex = MAX(result.rightNonColorIndex, lineResult.rightNonColorIndex);
}
currentLine = (unsigned int *)((char *)currentLine + bytesPerRow);
}
double delta = result.nonColorCount / (double)(width * height);
result.colorRatio = 1.0 - delta;
return result;
}
#implementation UIImage (MyCategory)
- (UIImage *)crop:(CGRect)rect {
rect = CGRectMake(rect.origin.x * self.scale,
rect.origin.y * self.scale,
rect.size.width * self.scale,
rect.size.height * self.scale);
CGImageRef imageRef = CGImageCreateWithImageInRect([self CGImage], rect);
UIImage *result = [UIImage imageWithCGImage:imageRef
scale:self.scale
orientation:self.imageOrientation];
CGImageRelease(imageRef);
return result;
}
- (UIImage*)trimWhiteBorders {
#ifdef __BIG_ENDIAN__
// undefined
#else
const unsigned int whiteXRGB = 0x00ffffff;
// Which bits to actually check
const unsigned int maskXRGB = 0x00ffffff;
#endif
CGImageRef image = [self CGImage];
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(image);
// Only support default image formats
if (bitmapInfo != (kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Host))
return nil;
CGDataProviderRef dataProvider = CGImageGetDataProvider(image);
CFDataRef imageData = CGDataProviderCopyData(dataProvider);
colorBoundaries result = findTrimColorBoundaries((unsigned int *)CFDataGetBytePtr(imageData),
(unsigned int)CGImageGetWidth(image),
(unsigned int)CGImageGetHeight(image),
(unsigned int)CGImageGetBytesPerRow(image),
whiteXRGB,
maskXRGB);
CFRelease(imageData);
if (result.nonColorCount == 0 || result.colorRatio > CLIP_THRESHOLD)
return self;
CGRect trimRect = CGRectMake(result.leftNonColorIndex,
result.topNonColorIndex,
result.rightNonColorIndex - result.leftNonColorIndex + 1,
result.bottomNonColorIndex - result.topNonColorIndex + 1);
return [self crop:trimRect];
}
#end
Tested the code below in Xcode 6.3.1 using iOS 8.3 LLVM 6.1 on an iPad 3(armv7) and an iPhone 6 (arm64) and they produced the same values to at least 15 points of precision.
unsigned int a = 42033;
unsigned int b = 360;
unsigned int c = 466;
double result = a / (double)(b * c);
// on arm64 -> result = 0.25055436337625181
// on armv7 -> result = 0.24986030696800732
NSString *msg = [NSString stringWithFormat:#"result: %.15f", result];
[[[UIAlertView alloc] initWithTitle:#"" message:msg delegate:nil cancelButtonTitle:#"#jolo" otherButtonTitles:nil] show];
That being said, Xcode 6.3 includes LVVM 6.1 and that includes changes for arm64 and floating point math. See the Apple LLVM Compiler Version 6.1 section in the release notes.
https://developer.apple.com/library/ios/releasenotes/DeveloperTools/RN-Xcode/Chapters/xc6_release_notes.html
Java code:
public class Division {
public static void main(String[] args) {
int a = 42033;
int b = 360;
int c = 466;
double result = a / (double)(b * c);
System.out.println("Result = " + result);
double result2 = (a - 1) / (double) (b * c);
double result3 = (a) / (double) ((b + 1) * c);
double result4 = (a) / (double) (b * (c + 1));
System.out.println("Result2 = " + result2);
System.out.println("Result3 = " + result3);
System.out.println("Result4 = " + result4);
}
}
Results:
C:\JavaTools>java Division
Result = 0.2505543633762518
Result2 = 0.250548402479733
Result3 = 0.24986030696800732
Result4 = 0.25001784439685937
As can be seen, the "wrong" results are explained by having a value for b other than what was stated by the OP. Has nothing to do with the precision of the arithmetic.

BIGNUM strange behavior in a calculation loop

I'm trying to implement a basic routine to perform some calculation on BIGNUM(s) and I've found a strange behavior. The functions are as follows
unsigned char *char_array_as_hex(unsigned char *chr_a, int len)
{
unsigned char *chr_s = (unsigned char *)malloc(len * 2);
char buffer[5];
for (int i = 0; i < len; i++)
{
sprintf(buffer, "%02X", chr_a[i]);
chr_s[(2 * i) + 0] = buffer[0];
chr_s[(2 * i) + 1] = buffer[1];
}
return chr_s;
}
and
char *big_number_as_decimal_from_hex_array(unsigned char *chr_a, int len, BN_CTX *bn_ctx)
{
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
return big_number_as_decimal;
}
and
void test_compute_prime256v1()
{
BN_CTX *bn_ctx = BN_CTX_new();
BN_CTX_start(bn_ctx);
unsigned char seed_a[20] = {
0xC4,0x9D,0x36,0x08,0x86,0xE7,0x04,0x93,0x6A,0x66, /* seed */
0x78,0xE1,0x13,0x9D,0x26,0xB7,0x81,0x9F,0x7E,0x90
};
printf("s = %s\n", big_number_as_decimal_from_hex_array(seed_a, 20, bn_ctx));
unsigned char p_a[32] = {
0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x01,0x00,0x00, /* p */
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF
};
printf("p = %s\n", big_number_as_decimal_from_hex_array(p_a, 32, bn_ctx));
BN_CTX_end(bn_ctx);
BN_CTX_free(bn_ctx);
}
then I call "test_compute_prime256v1" in an Objective-C method. If I call it once or multiple times with a reasonable delay between each call it produces correct result however, when I call that function in a loop it produces different incorrect values
- (IBAction)btnOK_Clicked:(id)sender
{
for (int i = 1; i < 10; i++)
{
printf("i = %d\n", i);
test_compute_prime256v1();
}
}
and a sample output was
i = 1
s = 1122468115042657169822351801880191947498376363664
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
i = 2
s = 1122468115042657169822351801880191947498376363664
p = 966134380529368896499052403318808180610643774633026536153469502543482958881555881553276...
i = 3
s = 1122468115042657169822351801880191947498376363664
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
Note: some numbers are trimmed to fit in. I have followed the suggestion in here.
Am I missing something? Is there any mistake somewhere?
Anyone can help?
Thanks
EDITED:
I made some modification to code but the issue still exists. I changed big_number_as_decimal_from_hex_array as follows
char *big_number_as_decimal_from_hex_array_ex(unsigned char *chr_a, int len)
{
BN_CTX *bn_ctx = BN_CTX_new();
BN_CTX_start(bn_ctx);
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
BN_CTX_end(bn_ctx);
BN_CTX_free(bn_ctx);
return big_number_as_decimal;
}
and also
char *big_number_as_decimal_from_hex_array_ex_2(unsigned char *chr_a, int len)
{
BN_CTX *bn_ctx = BN_CTX_new();
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
BN_CTX_free(bn_ctx);
return big_number_as_decimal;
}
I modified the test_compute_prime256v1 as
void test_compute_prime256v1_ex()
{
unsigned char seed_a[20] = {...};
printf("s = %s\n", big_number_as_decimal_from_hex_array_ex(seed_a, 20));
unsigned char p_a[32] = {...};
printf("p = %s\n", big_number_as_decimal_from_hex_array_ex(p_a, 32));
// or
unsigned char seed_a[20] = {...};
printf("s = %s\n", big_number_as_decimal_from_hex_array_ex_2(seed_a, 20));
unsigned char p_a[32] = {...};
printf("p = %s\n", big_number_as_decimal_from_hex_array_ex_2(p_a, 32));
}
but the code produces the same incorrect result in a looped calculation
BN_hex2bn(&big_number, (char *)hex_s); expects a C string as second argument, ie a '\0' terminated one since it has no other way to know the size of your string.

Bit field ordering on Big-Endian (SPARC) processor

Consider the code below:
#include <stdio.h>
#include <stdlib.h>
#define FORCE_CAST(var, type) *(type*)&var
struct processor_status_register
{
unsigned int cwp:5;
unsigned int et:1;
unsigned int ps:1;
unsigned int s:1;
unsigned int pil:4;
unsigned int ef:1;
unsigned int ec:1;
unsigned int reserved:6;
unsigned int c:1;
unsigned int v:1;
unsigned int z:1;
unsigned int n:1;
unsigned int ver:4;
unsigned int impl:4;
}__attribute__ ((__packed__));
struct registers
{
unsigned long* registerSet;
unsigned long* globalRegisters;
unsigned long* cwptr;
unsigned long wim, tbr, y, pc, npc;
unsigned short registerWindows;
/* Though Intel x86 architecture allows un-aligned memory access, SPARC mandates memory accesses to be 8 byte aligned. Without __attribute__ ((aligned (8))) or a preceding dummy byte e.g. unsigned short dummyByte, the code below crashes with a dreaded Bus error and Core dump. For more details, follow the links below:
http://blog.jgc.org/2007/04/debugging-solaris-bus-error-caused-by.html
https://groups.google.com/forum/?fromgroups=#!topic/comp.unix.solaris/8SgFiMudGL4
*/
struct processor_status_register __attribute__ ((aligned (8))) psr;
}__attribute__ ((__packed__));
int getBit(unsigned long bitStream, int position)
{
int bit;
bit = (bitStream & (1 << position)) >> position;
return bit;
}
char* showBits(unsigned long bitStream, int startPosition, int endPosition)
{
// Allocate one extra byte for NULL character
char* bits = (char*)malloc(endPosition - startPosition + 2);
int bitIndex;
for(bitIndex = 0; bitIndex <= endPosition; bitIndex++)
bits[bitIndex] = (getBit(bitStream, endPosition - bitIndex)) ? '1' : '0';
bits[bitIndex] = '\0';
return bits;
}
int main()
{
struct registers sparcRegisters; short isLittleEndian;
// Check for Endianness
unsigned long checkEndian = 0x00000001;
if(*((char*)(&checkEndian)))
{printf("Little Endian\n"); isLittleEndian = 1;} // Little
Endian architecture detected
else
{printf("Big Endian\n"); isLittleEndian = 0;} // Big
Endian architecture detected
unsigned long registerValue = 0xF30010A7;
unsigned long swappedRegisterValue = isLittleEndian ? registerValue :
__builtin_bswap32(registerValue);
sparcRegisters.psr = FORCE_CAST(swappedRegisterValue, struct
processor_status_register);
registerValue = isLittleEndian ? FORCE_CAST (sparcRegisters.psr,
unsigned long) : __builtin_bswap32(FORCE_CAST (sparcRegisters.psr,
unsigned long));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
sparcRegisters.psr.impl, sparcRegisters.psr.ver,
sparcRegisters.psr.cwp);
printf("PSR=%s\n",showBits(registerValue, 0, 31));
sparcRegisters.psr.cwp = 7;
sparcRegisters.psr.et = 1;
sparcRegisters.psr.ps = 0;
sparcRegisters.psr.s = 1;
sparcRegisters.psr.pil = 0;
sparcRegisters.psr.ef = 0;
sparcRegisters.psr.ec = 0;
sparcRegisters.psr.reserved = 0;
sparcRegisters.psr.c = 0;
sparcRegisters.psr.v = 0;
sparcRegisters.psr.z = 0;
sparcRegisters.psr.n = 0;
sparcRegisters.psr.ver = 3;
sparcRegisters.psr.impl = 0xF;
registerValue = isLittleEndian ? FORCE_CAST (sparcRegisters.psr,
unsigned long) : __builtin_bswap32(FORCE_CAST (sparcRegisters.psr,
unsigned long));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
sparcRegisters.psr.impl, sparcRegisters.psr.ver,
sparcRegisters.psr.cwp);
printf("PSR=%s\n\n",showBits(registerValue, 0, 31));
return 0;
}
I have used gcc-4.7.2 on Solaris 10 on SPARC to compile the following
code to produce the Big-Endian output:
Big Endian
PSR=0xF30010A7, IMPL=3, VER=15, CWP=20
PSR=11110011000000000001000010100111
PSR=0x3F00003D, IMPL=15, VER=3, CWP=7
PSR=00111111000000000000000000111101
I have used gcc-4.4 on Ubuntu-10.04 on Intel-x86 to compile the
following code to produce the Little-Endian output:
Little Endian
PSR=0xF30010A7, IMPL=15, VER=3, CWP=7
PSR=11110011000000000001000010100111
PSR=0xF30000A7, IMPL=15, VER=3, CWP=7
PSR=11110011000000000000000010100111
While the later one is as expected, can anyone please explain the
Big-Endian counterpart? Considering the showBits() method to be
correct, how can PSR=0x3F00003D give rise to IMPL=15, VER=3, CWP=7
values? How is the bit-field is being arranged and interpreted in
memory on a Big-Endian system?
... PSR=0x3F00003D give rise to IMPL=15, VER=3, CWP=7 values?
It cant. I don't know why you're calling __builtin_bswap32 but 0x3F00003D does not represent the memory of the sparcRegisters struct as you initialized it.
Lets check this code:
sparcRegisters.psr.cwp = 7;
sparcRegisters.psr.et = 1;
sparcRegisters.psr.ps = 0;
sparcRegisters.psr.s = 1;
sparcRegisters.psr.pil = 0;
sparcRegisters.psr.ef = 0;
sparcRegisters.psr.ec = 0;
sparcRegisters.psr.reserved = 0;
sparcRegisters.psr.c = 0;
sparcRegisters.psr.v = 0;
sparcRegisters.psr.z = 0;
sparcRegisters.psr.n = 0;
sparcRegisters.psr.ver = 3;
sparcRegisters.psr.impl = 0xF;
The individual translations are as follows:
7 => 00111
1 => 1
0 => 0
1 => 1
0 => 0000
0 => 0
0 => 0
0 => 000000
0 => 0
0 => 0
0 => 0
0 => 0
3 => 0011
F => 1111
The structure therefore in memory becomes 00111101000000000000000000111111 which is 0x3D00003F in big-endian.
You can confirm with this code (tested using CC in solaris):
#include <stdio.h>
#include <string.h>
struct processor_status_register
{
unsigned int cwp:5;
unsigned int et:1;
unsigned int ps:1;
unsigned int s:1;
unsigned int pil:4;
unsigned int ef:1;
unsigned int ec:1;
unsigned int reserved:6;
unsigned int c:1;
unsigned int v:1;
unsigned int z:1;
unsigned int n:1;
unsigned int ver:4;
unsigned int impl:4;
}__attribute__ ((__packed__));
int getBit(unsigned long bitStream, int position)
{
int bit;
bit = (bitStream & (1 << position)) >> position;
return bit;
}
char* showBits(unsigned long bitStream, int startPosition, int endPosition)
{
// Allocate one extra byte for NULL character
static char bits[33];
memset(bits, 0, 33);
int bitIndex;
for(bitIndex = 0; bitIndex <= endPosition; bitIndex++)
{
bits[bitIndex] = (getBit(bitStream, endPosition - bitIndex)) ? '1' : '0';
}
return bits;
}
int main()
{
processor_status_register psr;
psr.cwp = 7;
psr.et = 1;
psr.ps = 0;
psr.s = 1;
psr.pil = 0;
psr.ef = 0;
psr.ec = 0;
psr.reserved = 0;
psr.c = 0;
psr.v = 0;
psr.z = 0;
psr.n = 0;
psr.ver = 3;
psr.impl = 0xF;
unsigned long registerValue = 0;
memcpy(&registerValue, &psr, sizeof(registerValue));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
psr.impl, psr.ver,
psr.cwp);
printf("PSR=%s\n\n",showBits(registerValue, 0, 31));
return 0;
}
The output of this is:
PSR=0x3D00003F, IMPL=15, VER=3, CWP=7
PSR=00111101000000000000000000111111

Resources