It looks like starting with iOS 10.2, Apple has now prevented access to all MAC addresses, not just the one of your own device.
However, there are some apps in the store that seem to manage that still, .e.g Fing and Net Analyzer. Are these still working because they were compiled against an older SDK or do they have special tricks to gather the MAC address?
Can anyone share a work-around to get the MAC addresses for iOS 10.2 devices on WiFi?
This is only test code, just to give an idea for how to get the Mac address. But I am sure Apple will soon close this option.
-(void) jan_mac_addr_test:(const char*) host
{
#define BUFLEN (sizeof(struct rt_msghdr) + 512)
#define SEQ 9999
#define RTM_VERSION 5 // important, version 2 does not return a mac address!
#define RTM_GET 0x4 // Report Metrics
#define RTF_LLINFO 0x400 // generated by link layer (e.g. ARP)
#define RTF_IFSCOPE 0x1000000 // has valid interface scope
#define RTA_DST 0x1 // destination sockaddr present
int sockfd;
unsigned char buf[BUFLEN];
unsigned char buf2[BUFLEN];
ssize_t n;
struct rt_msghdr *rtm;
struct sockaddr_in *sin;
memset(buf,0,sizeof(buf));
memset(buf2,0,sizeof(buf2));
sockfd = socket(AF_ROUTE, SOCK_RAW, 0);
rtm = (struct rt_msghdr *) buf;
rtm->rtm_msglen = sizeof(struct rt_msghdr) + sizeof(struct sockaddr_in);
rtm->rtm_version = RTM_VERSION;
rtm->rtm_type = RTM_GET;
rtm->rtm_addrs = RTA_DST;
rtm->rtm_flags = RTF_LLINFO;
rtm->rtm_pid = 1234;
rtm->rtm_seq = SEQ;
sin = (struct sockaddr_in *) (rtm + 1);
sin->sin_len = sizeof(struct sockaddr_in);
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = inet_addr(host);
write(sockfd, rtm, rtm->rtm_msglen);
n = read(sockfd, buf2, BUFLEN);
if (n != 0) {
int index = sizeof(struct rt_msghdr) + sizeof(struct sockaddr_inarp) + 8;
// savedata("test",buf2,n);
NSLog(#"IP %s :: %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",host,buf2[index+0], buf2[index+1], buf2[index+2], buf2[index+3], buf2[index+4], buf2[index+5]);
}
}
Related
I am validating DPDK receive functionality & for this I'm shooting a pcap externally &
added code in l2fwd to dump received packets to pcap, the l2fwd dumped pcap have all the packets from shooter but some of them are not in sequence.
Shooter is already validated.
DPDK version in use-21.11
link of the pcap used : https://wiki.wireshark.org/uploads/__moin_import__/attachments/SampleCaptures/tcp-ecn-sample.pcap
Out of order packets are random. For the first run I saw no jumbled packets but was able to replicate the issue on second run with the 2nd,3rd,4th packets jumbled having order 3,4,2.
Below is snipped from l2fwd example & our modifications as //TESTCODE..
/* Read packet from RX queues. 8< */
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
// TESTCODE_STARTS
uint8_t* pkt = rte_pktmbuf_mtod(m, uint8_t*);
dump_to_pcap(pkt, rte_pktmbuf_pkt_len(m));
// TESTCODE_ENDS
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
l2fwd_simple_forward(m, portid);
}
}
/* >8 End of read packet from RX queues. */
Below is code for dump_to_pcap
static int
dump_to_pcap(uint8_t* pkt, int pkt_len)
{
static FILE* fp = NULL;
static int init_file = 0;
if (0 == init_file) {
printf("Creating pcap\n");
char pcap_filename[256] = { 0 };
char Two_pcap_filename[256] = { 0 };
currentDateTime(pcap_filename);
sprintf(Two_pcap_filename,".\\Rx_%d_%s.pcap", 0, pcap_filename);
printf("FileSName to Create: %s\n", Two_pcap_filename);
fp = fopen(Two_pcap_filename, "wb");
if (NULL == fp) {
printf("Unable to open file\n");
fp = NULL;
}
else {
printf("File create success..\n");
init_file = 1;
typedef struct pcap_file_header1 {
unsigned int magic; // a 32-bit "magic number"
unsigned short version_major; //a 16-bit major version number
unsigned short version_minor; //a 16-bit minor version number
unsigned int thiszone; //a 32-bit "time zone offset" field that's actually not used, so ou can (and probably should) just make it 0
unsigned int sigfigs; //a 32-bit "time stamp accuracy" field that's not actually used,so you can (and probably should) just make it 0;
unsigned int snaplen; //a 32-bit "snapshot length" field
unsigned int linktype; //a 32-bit "link layer type" field
}dumpFileHdr;
dumpFileHdr file_hdr;
file_hdr.magic = 2712847316; //0xa1b2c3d4;
file_hdr.version_major = 2;
file_hdr.version_minor = 4;
file_hdr.thiszone = 0;
file_hdr.sigfigs = 0;
file_hdr.snaplen = 65535;
file_hdr.linktype = 1;
fwrite((void*)(&file_hdr), sizeof(dumpFileHdr), 1, fp);
//printf("Pcap Header written\n");
}
}
typedef struct pcap_pkthdr1 {
unsigned int ts_sec; /* time stamp */
unsigned int ts_usec;
unsigned int caplen; /* length of portion present */
unsigned int len; /* length this packet (off wire) */
}dumpPktHdr;
dumpPktHdr pkt_hdr;
static int ts_sec = 1;
pkt_hdr.ts_sec = ts_sec++;
pkt_hdr.ts_usec = 0;
pkt_hdr.caplen = pkt_hdr.len = pkt_len;
if (NULL != fp) {
fwrite((void*)(&pkt_hdr), sizeof(dumpPktHdr), 1, fp);
fwrite((void*)(pkt), pkt_len, 1, fp);
fflush(fp);
}
return 0;
}
I would like to make as automatic as possible the compilation and linking of my code projects using OpenCL on OS X, I know how to do it for C++ but I am experiencing problems for OpenCL. This is the code that I am using as an example:
main.cpp:
#include <stdio.h>
#include <stdlib.h>
#ifdef __APPLE__ //Mac OSX has a different name for the header file
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define MEM_SIZE (128)//suppose we have a vector with 128 elements
#define MAX_SOURCE_SIZE (0x100000)
int main()
{
//In general Intel CPU and NV/AMD's GPU are in different platforms
//But in Mac OSX, all the OpenCL devices are in the platform "Apple"
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_context context = NULL;
cl_command_queue command_queue = NULL; //"stream" in CUDA
cl_mem memobj = NULL;//device memory
cl_program program = NULL; //cl_prgram is a program executable created from the source or binary
cl_kernel kernel = NULL; //kernel function
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret; //accepts return values for APIs
float mem[MEM_SIZE]; //alloc memory on host(CPU) ram
//OpenCL source can be placed in the source code as text strings or read from another file.
FILE *fp;
const char fileName[] = "./kernel.cl";
size_t source_size;
char *source_str;
cl_int i;
// read the kernel file into ram
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char *)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp );
fclose( fp );
//initialize the mem with 1,2,3...,n
for( i = 0; i < MEM_SIZE; i++ ) {
mem[i] = i;
}
//get the device info
ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &ret_num_devices);
//create context on the specified device
context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
//create the command_queue (stream)
command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
//alloc mem on the device with the read/write flag
memobj = clCreateBuffer(context, CL_MEM_READ_WRITE, MEM_SIZE * sizeof(float), NULL, &ret);
//copy the memory from host to device, CL_TRUE means blocking write/read
ret = clEnqueueWriteBuffer(command_queue, memobj, CL_TRUE, 0, MEM_SIZE * sizeof(float), mem, 0, NULL, NULL);
//create a program object for a context
//load the source code specified by the text strings into the program object
program = clCreateProgramWithSource(context, 1, (const char **)&source_str, (const size_t *)&source_size, &ret);
//build (compiles and links) a program executable from the program source or binary
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
//create a kernel object with specified name
kernel = clCreateKernel(program, "vecAdd", &ret);
//set the argument value for a specific argument of a kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&memobj);
//define the global size and local size (grid size and block size in CUDA)
size_t global_work_size[3] = {MEM_SIZE, 0, 0};
size_t local_work_size[3] = {MEM_SIZE, 0, 0};
//Enqueue a command to execute a kernel on a device ("1" indicates 1-dim work)
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
//copy memory from device to host
ret = clEnqueueReadBuffer(command_queue, memobj, CL_TRUE, 0, MEM_SIZE * sizeof(float), mem, 0, NULL, NULL);
//print out the result
for(i=0; i<MEM_SIZE; i++) {
printf("mem[%d] : %.2f\n", i, mem[i]);
}
//clFlush only guarantees that all queued commands to command_queue get issued to the appropriate device
//There is no guarantee that they will be complete after clFlush returns
ret = clFlush(command_queue);
//clFinish blocks until all previously queued OpenCL commands in command_queue are issued to the associated device and have completed.
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(memobj);//free memory on device
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(source_str);//free memory on host
return 0;
}
kernel.cl:
__kernel void vecAdd(__global float* a)
{
int gid = get_global_id(0);// in CUDA = blockIdx.x * blockDim.x + threadIdx.x
a[gid] += a[gid];
}
and this is my CMakelists.txt so far:
#Minimal OpenCL CMakeLists.txt by StreamHPC
cmake_minimum_required (VERSION 3.1)
project(GreatProject)
# Handle OpenCL
find_package(OpenCL REQUIRED)
include_directories(${OpenCL_INCLUDE_DIRS})
link_directories(${OpenCL_LIBRARY})
add_executable (main main.cpp)
target_include_directories (main PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries (main ${OpenCL_LIBRARY})
Apparently it compiles, but when I run the executable I get the error:
Failed to load kernel.
I compiled successfully the code by hand following this answer, but my project is willing to have various kernels and various C++ files and headers, therefore I would like to use CMake in order to automatize the compilation of the project.
How should I modify my CMakeLists.txt script?
NOTE:
I guess that the file kernel.cl is not being compiled, I don't what is a proper way to guarantee having a CMakeLists.txt that always compiles all the *.cl files in the project directory in addition to all the *.cpp. Would be even better if it is posible to linking agains MKL.
For mac opencl is used as a framework you need to do the following to link libraries from the framework.
cmake_minimum_required (VERSION 2.6)
project (montecarlo_cl)
find_package(OpenCL REQUIRED)
include_directories( ${OPENCL_INCLUDE_DIR})
set (montecarlo_cl_src montecarlo_ocl.c)
add_executable (montecarlo_cl ${montecarlo_cl_src})
target_link_libraries(montecarlo_cl "-framework OpenCL" )
everyone , I need help...
I'm trying to recover mac following these links
Getting ARP table on iPhone/iPad
How do I query the ARP table on iPhone?
My test was correct only once.....
my code was not changed, only I included cocoaPods for AFNetworking.
when I tested again, always fails...the mac always is nil...mi ip is correct
my code is :
const char *c = [ipAddress UTF8String];
u_long addr = inet_addr(c);
NSString *mac = [self ip2mac:addr ];
- (NSString*)ip2mac:(in_addr_t)addr{
NSString *ret = nil;
size_t needed;
char *buf, *next;
struct rt_msghdr *rtm;
struct sockaddr_inarp *sin;
struct sockaddr_dl *sdl;
int mib[6];
mib[0] = CTL_NET;
mib[1] = PF_ROUTE;
mib[2] = 0;
mib[3] = AF_INET;
mib[4] = NET_RT_FLAGS;
mib[5] = RTF_LLINFO;
if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, &needed, NULL, 0) < 0)
err(1, "route-sysctl-estimate");
if ((buf = (char*)malloc(needed)) == NULL)
err(1, "malloc");
if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), buf, &needed, NULL, 0) < 0)
err(1, "retrieval of routing table");
for (next = buf; next < buf + needed; next += rtm->rtm_msglen) {
rtm = (struct rt_msghdr *)next;
sin = (struct sockaddr_inarp *)(rtm + 1);
sdl = (struct sockaddr_dl *)(sin + 1);
if (addr != sin->sin_addr.s_addr || sdl->sdl_alen < 6)
continue;
u_char *cp = (u_char*)LLADDR(sdl);
ret = [NSString stringWithFormat:#"%02X:%02X:%02X:%02X:%02X:%02X",
cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]];
break;
}
free(buf);
return ret;
}
only comes once in...
for (next = buf; next < buf + needed; next += rtm->rtm_msglen)
size needed always is 128....
if put assignment
ret = [NSString stringWithFormat:#"%02X:%02X:%02X:%02X:%02X:%02X",
cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]];
before
if (addr != sin->sin_addr.s_addr || sdl->sdl_alen < 6)
continue;
return a value mac incorrect...It is wrong, I have no such device with this MAC
thanks everyone
I found the solution.
u_long addr = inet_addr(c);
change
in_addr_t addr = inet_addr(c);
This was changed as of iOS 8 so that the MAC address was no longer accessible.
How could I guarantee the integrity of the code of an iOS app? I've been taking a look to Apple's Security Overview document, would code signing be enough? Is there any other recommended mechanism to guarantee the code integrity?
Thanks in advance
I had a same problem. This is easy on OS X but somewhat difficult in iOS because iOS doesn't have API like SecStaticCodeCheckValidity.
There are two sections in mach-o binary that you can use to ensure integrity of the app.
LC_ENCRYPTION_INFO
LC_CODE_SIGNATURE
1. LC_ENCRYPTION_INFO
First, LC_ENCRYPTION_INFO stores informations about 'app store encryption'. Once an app is uploaded to app store, app is encrypted before it is released to users.
binary before uploading to appstore or decrypted
otool -l [binary] | grep LC_ENCRYPTION_INFO -A5
cmd LC_ENCRYPTION_INFO
cmdsize 20
cryptoff 16384
cryptsize 5783552
cryptid 0
--
cmd LC_ENCRYPTION_INFO_64
cmdsize 24
cryptoff 16384
cryptsize 6635520
cryptid 0
pad 0
binary after uploading to appstore (encrypted)
otool -l [binary] | grep LC_ENCRYPTION_INFO -A5
cmd LC_ENCRYPTION_INFO
cmdsize 20
cryptoff 16384
cryptsize 5783552
cryptid 1
--
cmd LC_ENCRYPTION_INFO_64
cmdsize 24
cryptoff 16384
cryptsize 6635520
cryptid 1
pad 0
As you can see, 'cryptid' is set to 1 when app is uploaded. So checking 'cryptid' bit will tell us if the binary is encrypted or not.
You may think that this can be bypassed easily by just setting the bit to 1, but then OS will try to decrypt the binary which will make the codes to unrecognizable bytes.
bool isBinaryEncrypted()
{
// checking current binary's LC_ENCRYPTION_INFO
const void *binaryBase;
struct load_command *machoCmd;
const struct mach_header *machoHeader;
NSString *path = [[NSBundle mainBundle] executablePath];
NSData *filedata = [NSData dataWithContentsOfFile:path];
binaryBase = (char *)[filedata bytes];
machoHeader = (const struct mach_header *) binaryBase;
if(machoHeader->magic == FAT_CIGAM)
{
unsigned int offset = 0;
struct fat_arch *fatArch = (struct fat_arch *)((struct fat_header *)machoHeader + 1);
struct fat_header *fatHeader = (struct fat_header *)machoHeader;
for(uint32_t i = 0; i < ntohl(fatHeader->nfat_arch); i++)
{
if(sizeof(int *) == 4 && !(ntohl(fatArch->cputype) & CPU_ARCH_ABI64)) // check 32bit section for 32bit architecture
{
offset = ntohl(fatArch->offset);
break;
}
else if(sizeof(int *) == 8 && (ntohl(fatArch->cputype) & CPU_ARCH_ABI64)) // and 64bit section for 64bit architecture
{
offset = ntohl(fatArch->offset);
break;
}
fatArch = (struct fat_arch *)((uint8_t *)fatArch + sizeof(struct fat_arch));
}
machoHeader = (const struct mach_header *)((uint8_t *)machoHeader + offset);
}
if(machoHeader->magic == MH_MAGIC) // 32bit
{
machoCmd = (struct load_command *)((struct mach_header *)machoHeader + 1);
}
else if(machoHeader->magic == MH_MAGIC_64) // 64bit
{
machoCmd = (struct load_command *)((struct mach_header_64 *)machoHeader + 1);
}
for(uint32_t i=0; i < machoHeader->ncmds && machoCmd != NULL; i++){
if(machoCmd->cmd == LC_ENCRYPTION_INFO)
{
struct encryption_info_command *cryptCmd = (struct encryption_info_command *) machoCmd;
return cryptCmd->cryptid;
}
if(machoCmd->cmd == LC_ENCRYPTION_INFO_64)
{
struct encryption_info_command_64 *cryptCmd = (struct encryption_info_command_64 *) machoCmd;
return cryptCmd->cryptid;
}
machoCmd = (struct load_command *)((uint8_t *)machoCmd + machoCmd->cmdsize);
}
return FALSE; // couldn't find cryptcmd
}
2. LC_CODE_SIGNATURE
LC_CODE_SIGNATURE is the section that /usr/bin/codesign actually refers when checking validity of the binary. But parsing the section is a little bit more difficult than parsing LC_ENCRYPTION_INFO, because it's undocumented and there are no types like signature_info_command.
LC_CODE_SIGNATURE contains hashes of all of the binary except the section itself, and hashes are adjusted whenever it's re-signed.
I ported the codes of /usr/bin/codesign to parse this section. check here and SecStaticCode::validateExecutable defined in here
CodeSigning.h
#ifndef CodeSigning_h
#define CodeSigning_h
#include <stdio.h>
// codes from https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_codesigning/lib/cscdefs.h
enum {
CSMAGIC_REQUIREMENT = 0xfade0c00, /* single Requirement blob */
CSMAGIC_REQUIREMENTS = 0xfade0c01, /* Requirements vector (internal requirements) */
CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */
CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */
CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */
CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */
};
/*
* Structure of an embedded-signature SuperBlob
*/
typedef struct __BlobIndex {
uint32_t type; /* type of entry */
uint32_t offset; /* offset of entry */
} CS_BlobIndex;
typedef struct __SuperBlob {
uint32_t magic; /* magic number */
uint32_t length; /* total length of SuperBlob */
uint32_t count; /* number of index entries following */
CS_BlobIndex index[]; /* (count) entries */
/* followed by Blobs in no particular order as indicated by offsets in index */
} CS_SuperBlob;
/*
* C form of a CodeDirectory.
*/
typedef struct __CodeDirectory {
uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */
uint32_t length; /* total length of CodeDirectory blob */
uint32_t version; /* compatibility version */
uint32_t flags; /* setup and mode flags */
uint32_t hashOffset; /* offset of hash slot element at index zero */
uint32_t identOffset; /* offset of identifier string */
uint32_t nSpecialSlots; /* number of special hash slots */
uint32_t nCodeSlots; /* number of ordinary (code) hash slots */
uint32_t codeLimit; /* limit to main image signature range */
uint8_t hashSize; /* size of each hash in bytes */
uint8_t hashType; /* type of hash (cdHashType* constants) */
uint8_t spare1; /* unused (must be zero) */
uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */
uint32_t spare2; /* unused (must be zero) */
/* followed by dynamic content as located by offset fields above */
} CS_CodeDirectory;
static inline const CS_CodeDirectory *findCodeDirectory(const CS_SuperBlob *embedded)
{
if (embedded && ntohl(embedded->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
const CS_BlobIndex *limit = &embedded->index[ntohl(embedded->count)];
const CS_BlobIndex *p;
for (p = embedded->index; p < limit; ++p)
if (ntohl(p->type) == CSSLOT_CODEDIRECTORY) {
const unsigned char *base = (const unsigned char *)embedded;
const CS_CodeDirectory *cd = (const CS_CodeDirectory *)(base + ntohl(p->offset));
if (ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY){
return cd;
}
else{
break;
}
}
}
// not found
return NULL;
}
//
unsigned char validateSlot(const void *data, size_t length, size_t slot, const CS_CodeDirectory *codeDirectory);
#endif /* CodeSigning_h */
CodeSigning.c
#include "CodeSigning.h"
#include <stdio.h>
#include <string.h>
#import <CommonCrypto/CommonDigest.h>
unsigned char validateSlot(const void *data, size_t length, size_t slot, const CS_CodeDirectory *codeDirectory)
{
uint8_t digest[CC_SHA1_DIGEST_LENGTH + 1] = {0, };
CC_SHA1(data, (CC_LONG)length, digest);
return (memcmp(digest, (void *)((char *)codeDirectory + ntohl(codeDirectory->hashOffset) + 20*slot), 20) == 0);
}
parsing the section
void checkCodeSignature(void *binaryContent){
struct load_command *machoCmd;
const struct mach_header *machoHeader;
machoHeader = (const struct mach_header *) binaryContent;
if(machoHeader->magic == FAT_CIGAM){
unsigned int offset = 0;
struct fat_arch *fatArch = (struct fat_arch *)((struct fat_header *)machoHeader + 1);
struct fat_header *fatHeader = (struct fat_header *)machoHeader;
for(uint32_t i = 0; i < ntohl(fatHeader->nfat_arch); i++)
{
if(sizeof(int *) == 4 && !(ntohl(fatArch->cputype) & CPU_ARCH_ABI64)) // check 32bit section for 32bit architecture
{
offset = ntohl(fatArch->offset);
break;
}
else if(sizeof(int *) == 8 && (ntohl(fatArch->cputype) & CPU_ARCH_ABI64)) // and 64bit section for 64bit architecture
{
offset = ntohl(fatArch->offset);
break;
}
fatArch = (struct fat_arch *)((uint8_t *)fatArch + sizeof(struct fat_arch));
}
machoHeader = (const struct mach_header *)((uint8_t *)machoHeader + offset);
}
if(machoHeader->magic == MH_MAGIC) // 32bit
{
machoCmd = (struct load_command *)((struct mach_header *)machoHeader + 1);
}
else if(machoHeader->magic == MH_MAGIC_64) // 64bit
{
machoCmd = (struct load_command *)((struct mach_header_64 *)machoHeader + 1);
}
for(uint32_t i=0; i < machoHeader->ncmds && machoCmd != NULL; i++){
if(machoCmd->cmd == LC_CODE_SIGNATURE)
{
struct linkedit_data_command *codeSigCmd = (struct linkedit_data_command *) machoCmd;
const CS_SuperBlob *codeEmbedded = (const CS_SuperBlob *)&((char *)machoHeader)[codeSigCmd->dataoff];
void *binaryBase = (void *)machoHeader;
const CS_BlobIndex curIndex = codeEmbedded->index[0];
const CS_CodeDirectory *codeDirectory = (const CS_CodeDirectory *)((char *)codeEmbedded + ntohl(curIndex.offset));
size_t pageSize = codeDirectory->pageSize ? (1 << codeDirectory->pageSize) : 0;
size_t remaining = ntohl(codeDirectory->codeLimit);
size_t processed = 0;
for(size_t slot = 0; slot < ntohl(codeDirectory->nCodeSlots); ++slot){
size_t size = MIN(remaining, pageSize);
if(!validateSlot(binaryBase+processed, size, slot, codeDirectory)){
return;
}
processed += size;
remaining -= size;
}
printf("[*] Code is valid!");
}
}
machoCmd = (struct load_command *)((uint8_t *)machoCmd + machoCmd->cmdsize);
}
I have the following code
int ParseData(unsigned char *packet, int len)
{
struct ethhdr *ethernet_header;
struct iphdr *ip_header;
struct tcphdr *tcp_header;
unsigned char *data;
int data_len;
/* Check if any data is there */
if(len > (sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct tcphdr)))
{
ip_header = (struct iphdr*)(packet + sizeof(struct ethhdr));
data = (packet + sizeof(struct ethhdr) + ip_header->ihl*4 + sizeof(struct tcphdr));
data_len = ntohs(ip_header->tot_len) - ip_header->ihl*4 - sizeof(struct tcphdr);
if(data_len)
{
printf("Data Len : %d\n", data_len);
PrintData("Data : ", data, data_len);
printf("\n\n");
return 1;
}
else
{
printf("No Data in packet\n");
return 0;
}
}
}
I am trying to print in ASCII the payload and with a simple function like this
PrintData(char *mesg, unsigned char *p, int len)
{
printf(mesg);
while(len--)
{
if(isprint(*p))
printf("%c", *p);
else
printf(".");
p++;
}
}
The code looks good, no compile problems/warning. The problem is that the first payload
character is not being print at position 0, but 12 bytes later.
I thought that all the "len" bytes are the exact data I have to print.
My data point at
data = (packet + sizeof(struct ethhdr) + ip_header->ihl*4 + sizeof(struct tcphdr));
however data[0] is not printable. What is the problem? Do I miss something? Do I have to check for the TCP options part maybe?
Thanks
That's right, adding the sizeof(struct tcphdr) is only going to get you past the header, not the options. To get to the actual data, you should use the 'offset' field from the TCP header. The offset is calculated from the start of the TCP header and is in 4-byte units, e.g. if the offset is 8 then the header + options length is 32.