Convert some bytes from a unsigned char to int - ios

I want to convert some bytes to an int.
This is my code so far:
unsigned char *bytePtr = (unsigned char *)[aNSDataFrame];
I want to take 4 bytes from this unsigned char:
myFrame[10], myFrame[11], myFrame[12] and myFrame[13] and convert them to an integer.

int val = *(const int*)&myFrame[10];

you can do,
int a;
a=myframe[10];
a=a<<8;
a=a|myframe[11];
a=a<<8;
a=a|myframe[12];
a=a<<8;
a=a|myframe[13];
this will create integer containing those bytes

int bytesToInt(unsigned char* b, unsigned length)
{
int val = 0;
int j = 0;
for (int i = length-1; i >= 0; --i)
{
val += (b[i] & 0xFF) << (8*j);
++j;
}
return val;
}

Related

Windows DPDK L2fwd- Receiving packets out of sequence

I am validating DPDK receive functionality & for this I'm shooting a pcap externally &
added code in l2fwd to dump received packets to pcap, the l2fwd dumped pcap have all the packets from shooter but some of them are not in sequence.
Shooter is already validated.
DPDK version in use-21.11
link of the pcap used : https://wiki.wireshark.org/uploads/__moin_import__/attachments/SampleCaptures/tcp-ecn-sample.pcap
Out of order packets are random. For the first run I saw no jumbled packets but was able to replicate the issue on second run with the 2nd,3rd,4th packets jumbled having order 3,4,2.
Below is snipped from l2fwd example & our modifications as //TESTCODE..
/* Read packet from RX queues. 8< */
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
// TESTCODE_STARTS
uint8_t* pkt = rte_pktmbuf_mtod(m, uint8_t*);
dump_to_pcap(pkt, rte_pktmbuf_pkt_len(m));
// TESTCODE_ENDS
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
l2fwd_simple_forward(m, portid);
}
}
/* >8 End of read packet from RX queues. */
Below is code for dump_to_pcap
static int
dump_to_pcap(uint8_t* pkt, int pkt_len)
{
static FILE* fp = NULL;
static int init_file = 0;
if (0 == init_file) {
printf("Creating pcap\n");
char pcap_filename[256] = { 0 };
char Two_pcap_filename[256] = { 0 };
currentDateTime(pcap_filename);
sprintf(Two_pcap_filename,".\\Rx_%d_%s.pcap", 0, pcap_filename);
printf("FileSName to Create: %s\n", Two_pcap_filename);
fp = fopen(Two_pcap_filename, "wb");
if (NULL == fp) {
printf("Unable to open file\n");
fp = NULL;
}
else {
printf("File create success..\n");
init_file = 1;
typedef struct pcap_file_header1 {
unsigned int magic; // a 32-bit "magic number"
unsigned short version_major; //a 16-bit major version number
unsigned short version_minor; //a 16-bit minor version number
unsigned int thiszone; //a 32-bit "time zone offset" field that's actually not used, so ou can (and probably should) just make it 0
unsigned int sigfigs; //a 32-bit "time stamp accuracy" field that's not actually used,so you can (and probably should) just make it 0;
unsigned int snaplen; //a 32-bit "snapshot length" field
unsigned int linktype; //a 32-bit "link layer type" field
}dumpFileHdr;
dumpFileHdr file_hdr;
file_hdr.magic = 2712847316; //0xa1b2c3d4;
file_hdr.version_major = 2;
file_hdr.version_minor = 4;
file_hdr.thiszone = 0;
file_hdr.sigfigs = 0;
file_hdr.snaplen = 65535;
file_hdr.linktype = 1;
fwrite((void*)(&file_hdr), sizeof(dumpFileHdr), 1, fp);
//printf("Pcap Header written\n");
}
}
typedef struct pcap_pkthdr1 {
unsigned int ts_sec; /* time stamp */
unsigned int ts_usec;
unsigned int caplen; /* length of portion present */
unsigned int len; /* length this packet (off wire) */
}dumpPktHdr;
dumpPktHdr pkt_hdr;
static int ts_sec = 1;
pkt_hdr.ts_sec = ts_sec++;
pkt_hdr.ts_usec = 0;
pkt_hdr.caplen = pkt_hdr.len = pkt_len;
if (NULL != fp) {
fwrite((void*)(&pkt_hdr), sizeof(dumpPktHdr), 1, fp);
fwrite((void*)(pkt), pkt_len, 1, fp);
fflush(fp);
}
return 0;
}

How to resolve type mismatch signed to unsigned in Objective C?

In this below code snippet, how to convert signed integer to unsigned integer without implicit conversion.
- (NSUInteger) getSysInfo: (uint) typeSpecifier
{
size_t size = sizeof(int);
int results;
int mib[2] = {CTL_HW, typeSpecifier};
sysctl(mib, 2, &results, &size, NULL, 0);
return (NSUInteger) results;
}
Try this
int value = 1234;
unsigned int unsigned_value = (unsigned int) value;

ios: EXC_ARM_DA_ALIGN error in release build

I have a function in my application, that store data from buffer. It works fine in debug mode both device and simulator, but when I create .ipa and run it on device, I have EXC_ARM_DA_ALIGN error libstdc++.6.dylib std::string::_M_replace_safe(unsigned long, unsigned long, char const, unsigned long)
struct stMemoryBlock
{
stMemoryBlock(void* InData, int InSize)
{
data = InData;
size = InSize;
offset = 0;
};
void* data;
unsigned int size;
unsigned int offset;
};
//-----------------------------------------------
char* cDataCollector::TestMemoryThink(char* Buffer, int BufferSize, int TestOffset, int TestSize)
{
char* result = NULL;
if (TestOffset + TestSize <= BufferSize)
{
result = &Buffer[TestOffset];
}
return result;
}
//-----------------------------------------------------
bool cDataCollector::StoreBinaryData(void* DataBuffer, int DataSize)
{
bool result = false;
char* InBuffer = (char *)DataBuffer;
if (!mPreparedData && !mPreparedDataSize && !mMemoryMap.size())
{
unsigned int CountElements = 0;
int offset = sizeof(unsigned int);
if (DataSize >= sizeof(unsigned int))
{
// CountElements = *(unsigned int*)(&InBuffer[0]);
memcpy(&CountElements, InBuffer, sizeof(CountElements));
}
result = true;
for (unsigned int i = 0; (i < CountElements) && result; ++i)
{
std::string ThinkName ;
stMemoryBlock * MemoryBlock = NULL;
result = result && TestMemoryThink(InBuffer, DataSize, offset, 0) != NULL;
if (result)
{
size_t name_think_size = strlen(&InBuffer[offset]);
char* think_name = TestMemoryThink(InBuffer, DataSize, offset, 0);
result = result && (think_name != NULL);
if (result)
{
ThinkName = think_name;
offset += (name_think_size + 1);
}
}
this line cause an error:
ThinkName = think_name;
maybe I need another way to read a string from memory location that isn’t word (32-bit) aligned? please,help!

BIGNUM strange behavior in a calculation loop

I'm trying to implement a basic routine to perform some calculation on BIGNUM(s) and I've found a strange behavior. The functions are as follows
unsigned char *char_array_as_hex(unsigned char *chr_a, int len)
{
unsigned char *chr_s = (unsigned char *)malloc(len * 2);
char buffer[5];
for (int i = 0; i < len; i++)
{
sprintf(buffer, "%02X", chr_a[i]);
chr_s[(2 * i) + 0] = buffer[0];
chr_s[(2 * i) + 1] = buffer[1];
}
return chr_s;
}
and
char *big_number_as_decimal_from_hex_array(unsigned char *chr_a, int len, BN_CTX *bn_ctx)
{
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
return big_number_as_decimal;
}
and
void test_compute_prime256v1()
{
BN_CTX *bn_ctx = BN_CTX_new();
BN_CTX_start(bn_ctx);
unsigned char seed_a[20] = {
0xC4,0x9D,0x36,0x08,0x86,0xE7,0x04,0x93,0x6A,0x66, /* seed */
0x78,0xE1,0x13,0x9D,0x26,0xB7,0x81,0x9F,0x7E,0x90
};
printf("s = %s\n", big_number_as_decimal_from_hex_array(seed_a, 20, bn_ctx));
unsigned char p_a[32] = {
0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x01,0x00,0x00, /* p */
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
0xFF,0xFF
};
printf("p = %s\n", big_number_as_decimal_from_hex_array(p_a, 32, bn_ctx));
BN_CTX_end(bn_ctx);
BN_CTX_free(bn_ctx);
}
then I call "test_compute_prime256v1" in an Objective-C method. If I call it once or multiple times with a reasonable delay between each call it produces correct result however, when I call that function in a loop it produces different incorrect values
- (IBAction)btnOK_Clicked:(id)sender
{
for (int i = 1; i < 10; i++)
{
printf("i = %d\n", i);
test_compute_prime256v1();
}
}
and a sample output was
i = 1
s = 1122468115042657169822351801880191947498376363664
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
i = 2
s = 1122468115042657169822351801880191947498376363664
p = 966134380529368896499052403318808180610643774633026536153469502543482958881555881553276...
i = 3
s = 1122468115042657169822351801880191947498376363664
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
Note: some numbers are trimmed to fit in. I have followed the suggestion in here.
Am I missing something? Is there any mistake somewhere?
Anyone can help?
Thanks
EDITED:
I made some modification to code but the issue still exists. I changed big_number_as_decimal_from_hex_array as follows
char *big_number_as_decimal_from_hex_array_ex(unsigned char *chr_a, int len)
{
BN_CTX *bn_ctx = BN_CTX_new();
BN_CTX_start(bn_ctx);
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
BN_CTX_end(bn_ctx);
BN_CTX_free(bn_ctx);
return big_number_as_decimal;
}
and also
char *big_number_as_decimal_from_hex_array_ex_2(unsigned char *chr_a, int len)
{
BN_CTX *bn_ctx = BN_CTX_new();
unsigned char *hex_s = char_array_as_hex(chr_a, len);
BIGNUM *big_number = BN_CTX_get(bn_ctx);
BN_hex2bn(&big_number, (char *)hex_s);
char *big_number_as_decimal = BN_bn2dec(big_number);
free(hex_s);
BN_free(big_number);
BN_CTX_free(bn_ctx);
return big_number_as_decimal;
}
I modified the test_compute_prime256v1 as
void test_compute_prime256v1_ex()
{
unsigned char seed_a[20] = {...};
printf("s = %s\n", big_number_as_decimal_from_hex_array_ex(seed_a, 20));
unsigned char p_a[32] = {...};
printf("p = %s\n", big_number_as_decimal_from_hex_array_ex(p_a, 32));
// or
unsigned char seed_a[20] = {...};
printf("s = %s\n", big_number_as_decimal_from_hex_array_ex_2(seed_a, 20));
unsigned char p_a[32] = {...};
printf("p = %s\n", big_number_as_decimal_from_hex_array_ex_2(p_a, 32));
}
but the code produces the same incorrect result in a looped calculation
BN_hex2bn(&big_number, (char *)hex_s); expects a C string as second argument, ie a '\0' terminated one since it has no other way to know the size of your string.

Bit field ordering on Big-Endian (SPARC) processor

Consider the code below:
#include <stdio.h>
#include <stdlib.h>
#define FORCE_CAST(var, type) *(type*)&var
struct processor_status_register
{
unsigned int cwp:5;
unsigned int et:1;
unsigned int ps:1;
unsigned int s:1;
unsigned int pil:4;
unsigned int ef:1;
unsigned int ec:1;
unsigned int reserved:6;
unsigned int c:1;
unsigned int v:1;
unsigned int z:1;
unsigned int n:1;
unsigned int ver:4;
unsigned int impl:4;
}__attribute__ ((__packed__));
struct registers
{
unsigned long* registerSet;
unsigned long* globalRegisters;
unsigned long* cwptr;
unsigned long wim, tbr, y, pc, npc;
unsigned short registerWindows;
/* Though Intel x86 architecture allows un-aligned memory access, SPARC mandates memory accesses to be 8 byte aligned. Without __attribute__ ((aligned (8))) or a preceding dummy byte e.g. unsigned short dummyByte, the code below crashes with a dreaded Bus error and Core dump. For more details, follow the links below:
http://blog.jgc.org/2007/04/debugging-solaris-bus-error-caused-by.html
https://groups.google.com/forum/?fromgroups=#!topic/comp.unix.solaris/8SgFiMudGL4
*/
struct processor_status_register __attribute__ ((aligned (8))) psr;
}__attribute__ ((__packed__));
int getBit(unsigned long bitStream, int position)
{
int bit;
bit = (bitStream & (1 << position)) >> position;
return bit;
}
char* showBits(unsigned long bitStream, int startPosition, int endPosition)
{
// Allocate one extra byte for NULL character
char* bits = (char*)malloc(endPosition - startPosition + 2);
int bitIndex;
for(bitIndex = 0; bitIndex <= endPosition; bitIndex++)
bits[bitIndex] = (getBit(bitStream, endPosition - bitIndex)) ? '1' : '0';
bits[bitIndex] = '\0';
return bits;
}
int main()
{
struct registers sparcRegisters; short isLittleEndian;
// Check for Endianness
unsigned long checkEndian = 0x00000001;
if(*((char*)(&checkEndian)))
{printf("Little Endian\n"); isLittleEndian = 1;} // Little
Endian architecture detected
else
{printf("Big Endian\n"); isLittleEndian = 0;} // Big
Endian architecture detected
unsigned long registerValue = 0xF30010A7;
unsigned long swappedRegisterValue = isLittleEndian ? registerValue :
__builtin_bswap32(registerValue);
sparcRegisters.psr = FORCE_CAST(swappedRegisterValue, struct
processor_status_register);
registerValue = isLittleEndian ? FORCE_CAST (sparcRegisters.psr,
unsigned long) : __builtin_bswap32(FORCE_CAST (sparcRegisters.psr,
unsigned long));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
sparcRegisters.psr.impl, sparcRegisters.psr.ver,
sparcRegisters.psr.cwp);
printf("PSR=%s\n",showBits(registerValue, 0, 31));
sparcRegisters.psr.cwp = 7;
sparcRegisters.psr.et = 1;
sparcRegisters.psr.ps = 0;
sparcRegisters.psr.s = 1;
sparcRegisters.psr.pil = 0;
sparcRegisters.psr.ef = 0;
sparcRegisters.psr.ec = 0;
sparcRegisters.psr.reserved = 0;
sparcRegisters.psr.c = 0;
sparcRegisters.psr.v = 0;
sparcRegisters.psr.z = 0;
sparcRegisters.psr.n = 0;
sparcRegisters.psr.ver = 3;
sparcRegisters.psr.impl = 0xF;
registerValue = isLittleEndian ? FORCE_CAST (sparcRegisters.psr,
unsigned long) : __builtin_bswap32(FORCE_CAST (sparcRegisters.psr,
unsigned long));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
sparcRegisters.psr.impl, sparcRegisters.psr.ver,
sparcRegisters.psr.cwp);
printf("PSR=%s\n\n",showBits(registerValue, 0, 31));
return 0;
}
I have used gcc-4.7.2 on Solaris 10 on SPARC to compile the following
code to produce the Big-Endian output:
Big Endian
PSR=0xF30010A7, IMPL=3, VER=15, CWP=20
PSR=11110011000000000001000010100111
PSR=0x3F00003D, IMPL=15, VER=3, CWP=7
PSR=00111111000000000000000000111101
I have used gcc-4.4 on Ubuntu-10.04 on Intel-x86 to compile the
following code to produce the Little-Endian output:
Little Endian
PSR=0xF30010A7, IMPL=15, VER=3, CWP=7
PSR=11110011000000000001000010100111
PSR=0xF30000A7, IMPL=15, VER=3, CWP=7
PSR=11110011000000000000000010100111
While the later one is as expected, can anyone please explain the
Big-Endian counterpart? Considering the showBits() method to be
correct, how can PSR=0x3F00003D give rise to IMPL=15, VER=3, CWP=7
values? How is the bit-field is being arranged and interpreted in
memory on a Big-Endian system?
... PSR=0x3F00003D give rise to IMPL=15, VER=3, CWP=7 values?
It cant. I don't know why you're calling __builtin_bswap32 but 0x3F00003D does not represent the memory of the sparcRegisters struct as you initialized it.
Lets check this code:
sparcRegisters.psr.cwp = 7;
sparcRegisters.psr.et = 1;
sparcRegisters.psr.ps = 0;
sparcRegisters.psr.s = 1;
sparcRegisters.psr.pil = 0;
sparcRegisters.psr.ef = 0;
sparcRegisters.psr.ec = 0;
sparcRegisters.psr.reserved = 0;
sparcRegisters.psr.c = 0;
sparcRegisters.psr.v = 0;
sparcRegisters.psr.z = 0;
sparcRegisters.psr.n = 0;
sparcRegisters.psr.ver = 3;
sparcRegisters.psr.impl = 0xF;
The individual translations are as follows:
7 => 00111
1 => 1
0 => 0
1 => 1
0 => 0000
0 => 0
0 => 0
0 => 000000
0 => 0
0 => 0
0 => 0
0 => 0
3 => 0011
F => 1111
The structure therefore in memory becomes 00111101000000000000000000111111 which is 0x3D00003F in big-endian.
You can confirm with this code (tested using CC in solaris):
#include <stdio.h>
#include <string.h>
struct processor_status_register
{
unsigned int cwp:5;
unsigned int et:1;
unsigned int ps:1;
unsigned int s:1;
unsigned int pil:4;
unsigned int ef:1;
unsigned int ec:1;
unsigned int reserved:6;
unsigned int c:1;
unsigned int v:1;
unsigned int z:1;
unsigned int n:1;
unsigned int ver:4;
unsigned int impl:4;
}__attribute__ ((__packed__));
int getBit(unsigned long bitStream, int position)
{
int bit;
bit = (bitStream & (1 << position)) >> position;
return bit;
}
char* showBits(unsigned long bitStream, int startPosition, int endPosition)
{
// Allocate one extra byte for NULL character
static char bits[33];
memset(bits, 0, 33);
int bitIndex;
for(bitIndex = 0; bitIndex <= endPosition; bitIndex++)
{
bits[bitIndex] = (getBit(bitStream, endPosition - bitIndex)) ? '1' : '0';
}
return bits;
}
int main()
{
processor_status_register psr;
psr.cwp = 7;
psr.et = 1;
psr.ps = 0;
psr.s = 1;
psr.pil = 0;
psr.ef = 0;
psr.ec = 0;
psr.reserved = 0;
psr.c = 0;
psr.v = 0;
psr.z = 0;
psr.n = 0;
psr.ver = 3;
psr.impl = 0xF;
unsigned long registerValue = 0;
memcpy(&registerValue, &psr, sizeof(registerValue));
printf("\nPSR=0x%0X, IMPL=%u, VER=%u, CWP=%u\n", registerValue,
psr.impl, psr.ver,
psr.cwp);
printf("PSR=%s\n\n",showBits(registerValue, 0, 31));
return 0;
}
The output of this is:
PSR=0x3D00003F, IMPL=15, VER=3, CWP=7
PSR=00111101000000000000000000111111

Resources