How to get C char * string value in swift? - ios

char* iperf_get_test_json_output_string ( struct iperf_test* ipt ); // .h file
I am using iperf SDK .c file in iOS project in swift. Above function return (char *) pointer reference. I am getting pointer reference. Anyone Can help to get the string in swift using this above function?
var result = iperf_get_test_json_output_string(test)
let temp = withUnsafePointer(to: result) {
$0.withMemoryRebound(to: UInt8.self, capacity: MemoryLayout.size(ofValue: $0) ) {
String(cString: $0)
}

Related

How to "talk" with USB device via my module?

I've written my module for gamepad that plugged by USB. I've already checked that my installed module detects my device successfully and even calls "probe" method:
static struct usb_driver driver =
{
.name = "Driver#1",
.id_table = table,
.probe = probe,
.disconnect = disconnect,
};
I have also written special urb function, that should "talk" with my device
usb_fill_int_urb(j->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress), //!
j->idata, 32, irq_hh,
j, ep_irq_in->bInterval);
But I cannot understand how to initialize action of irq_in. I presume that I need to press something and some information should be written in j->idata. I also did
usb_submit_urb(j->irq_in, GFP_KERNEL);
But nothing happens! Why?
My probe function:
static void irq_hh(struct urb *urb)
{
struct devicec *j = urb->context;/*В какую структура из всех процессов дб заполнена после приёма данных???*/
char *data = j->idata;
printk(KERN_INFO "irq_hh: %s", data);
}
static int probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev; //!
struct usb_endpoint_descriptor *ep_irq_in;
int ep_irq_in_idx, error;
printk(KERN_INFO "probe...%X",id->idVendor);
j = kzalloc(sizeof(struct devicec), GFP_KERNEL);
udev = interface_to_usbdev(intf);
j->irq_in = usb_alloc_urb(0, GFP_KERNEL);
//--------------------------------------
j->udev = udev;
j->intf = intf;
//--------------------------------------
usb_make_path(udev, j->phys, sizeof(j->phys));
strlcat(j->phys, "/input101", sizeof(j->phys));
printk(KERN_INFO "%s",j->phys);
ep_irq_in_idx = 0;
ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc;
usb_fill_int_urb(j->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress), //!
j->idata, 32, irq_hh,
j, ep_irq_in->bInterval);
j->irq_in->transfer_dma = j->idata_dma;
j->irq_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = usb_submit_urb(j->irq_in, GFP_KERNEL);
printk(KERN_INFO "%d",error);
return 0;
}

iOS: How to specify DNS to be used to resolve hostname to IP address?

As the title says I have hostname (eg www.example.com) that I want to resolve using specified DNS server. For example in one case I want to use google's IPv4 DNS and in other case google's IPv6 DNS.
I have browsed SO for something like this on iOS, and found questions like this one (Swift - Get device's IP Address), so I am sure it can be done, but I am unclear how?
How can I do this?
EDIT 06/07/2018
#mdeora suggested solution from http://www.software7.com/blog/programmatically-query-specific-dns-servers-on-ios/
This solution works but only if I use IPv4 DNS, for example google's "8.8.8.8". If I try to use IPv6 DNS 2001:4860:4860::8888, i get nothing.
I have managed to change conversion:
void setup_dns_server(res_state res, const char *dns_server)
{
res_ninit(res);
struct in_addr addr;
// int returnValue = inet_aton(dns_server, &addr);
inet_pton(AF_INET6, dns_server, &addr); // for IPv6 conversion
res->nsaddr_list[0].sin_addr = addr;
res->nsaddr_list[0].sin_family = AF_INET;
res->nsaddr_list[0].sin_port = htons(NS_DEFAULTPORT);
res->nscount = 1;
};
But still have trouble with this:
void query_ip(res_state res, const char *host, char ip[])
{
u_char answer[NS_PACKETSZ];//NS_IN6ADDRSZ NS_PACKETSZ
int len = res_nquery(res, host, ns_c_in, ns_t_a, answer, sizeof(answer));
ns_msg handle;
ns_initparse(answer, len, &handle);
if(ns_msg_count(handle, ns_s_an) > 0) {
ns_rr rr;
if(ns_parserr(&handle, ns_s_an, 0, &rr) == 0) {
strcpy(ip, inet_ntoa(*(struct in_addr *)ns_rr_rdata(rr)));
}
}
}
I get -1 for len. From what I gather it seems I need to configure res_state for IPv6.
Here the code from my blogpost, that was already mentioned above, just slightly adapted to use IPv6.
Adapt setup_dns_server
First we could start with the changes to setup_dns_server:
void setup_dns_server(res_state res, const char *dns_server) {
struct in6_addr addr;
inet_pton(AF_INET6, dns_server, &addr);
res->_u._ext.ext->nsaddrs[0].sin6.sin6_addr = addr;
res->_u._ext.ext->nsaddrs[0].sin6.sin6_family = AF_INET6;
res->_u._ext.ext->nsaddrs[0].sin6.sin6_port = htons(NS_DEFAULTPORT);
res->nscount = 1;
}
Add __res_state_ext
This wouldn't compile because of a missing struct __res_state_ext. This structure is unfortunately in a private header file.
But the definition of that one can be take from here:
https://opensource.apple.com/source/libresolv/libresolv-65/res_private.h.auto.html :
struct __res_state_ext {
union res_sockaddr_union nsaddrs[MAXNS];
struct sort_list {
int af;
union {
struct in_addr ina;
struct in6_addr in6a;
} addr, mask;
} sort_list[MAXRESOLVSORT];
char nsuffix[64];
char bsuffix[64];
char nsuffix2[64];
};
The struct can be added e.g. at the top of the file.
Adapt resolveHost
The changes here include the longer buffer for ip (INET6_ADDRSTRLEN). res_ninit moved from setup_dns_server into this method and is matched now with a res_ndestroy.
+ (NSString *)resolveHost:(NSString *)host usingDNSServer:(NSString *)dnsServer {
struct __res_state res;
char ip[INET6_ADDRSTRLEN];
memset(ip, '\0', sizeof(ip));
res_ninit(&res);
setup_dns_server(&res, [dnsServer cStringUsingEncoding:NSASCIIStringEncoding]);
query_ip(&res, [host cStringUsingEncoding:NSUTF8StringEncoding], ip);
res_ndestroy(&res);
return [[NSString alloc] initWithCString:ip encoding:NSASCIIStringEncoding];
}
Retrieving IPv6 addresses
The changes above are already sufficient if you just want to use a IPv6 address for your DNS server. So in query_ip there are no changes necessary if you still want to retrieve the IPv4 addresses.
In case you would like to retrieve IPv6 addresses from the DNS server also, you can do this:
void query_ip(res_state res, const char *host, char ip[]) {
u_char answer[NS_PACKETSZ];
int len = res_nquery(res, host, ns_c_in, ns_t_aaaa, answer, sizeof(answer));
ns_msg handle;
ns_initparse(answer, len, &handle);
if(ns_msg_count(handle, ns_s_an) > 0) {
ns_rr rr;
if(ns_parserr(&handle, ns_s_an, 0, &rr) == 0) {
inet_ntop(AF_INET6, ns_rr_rdata(rr), ip, INET6_ADDRSTRLEN);
}
}
}
Please note: we use here ns_t_aaaa to get AAAA resource records (quad-A record), because in DNS this specifies the mapping between IPv6 address and hostname. For many hosts, there is no such quad-A record, meaning you can just reach them via IPv4.
Call
You would call it e.g. like so:
NSString *resolved = [ResolveUtil resolveHost:#"www.google.com" usingDNSServer:#"2001:4860:4860::8888"];
NSLog(#"%#", resolved);
The result would the look like this:
Disclaimer
These are just simple example calls, that demonstrate the basic usage of the functions. There is no error handling.
You can do this using below swift code -
import Foundation
let task = Process()
task.launchPath = "/usr/bin/env"
task.arguments = ["dig", "#8.8.8.8", "google.com"]
let pipe = Pipe()
task.standardOutput = pipe
task.launch()
let data = pipe.fileHandleForReading.readDataToEndOfFile()
let output = NSString(data: data, encoding: String.Encoding.utf8.rawValue)
print(output!)
In the above code use the DNS server of your choice by replacing 8.8.8.8
For Objective-C iOS refer below link -
https://www.software7.com/blog/programmatically-query-specific-dns-servers-on-ios/
Below is the revised code for setting up dns -
void setup_dns_server(res_state res, const char *dns_server)
{
res_ninit(res);
struct in_addr6 addr;
// int returnValue = inet_aton(dns_server, &addr);
inet_pton(AF_INET6, dns_server, &addr); // for IPv6 conversion
res->nsaddr_list[0].sin_addr = addr;
res->nsaddr_list[0].sin_family = AF_INET6;
res->nsaddr_list[0].sin_port = htons(NS_DEFAULTPORT);
res->nscount = 1;
};
And the query code -
void query_ip(res_state res, const char *host, char ip[])
{
u_char answer[NS_PACKETSZ];//NS_IN6ADDRSZ NS_PACKETSZ
int len = res_nquery(res, host, ns_c_in, ns_t_a, answer, sizeof(answer));
ns_msg handle;
ns_initparse(answer, len, &handle);
if(ns_msg_count(handle, ns_s_an) > 0) {
ns_rr rr;
if(ns_parserr(&handle, ns_s_an, 0, &rr) == 0) {
strcpy(ip, inet_ntoa(*(struct in_addr6 *)ns_rr_rdata(rr)));
}
}
}
PS - I have not been able to test it, but it should work for ipv6 dns.

Read UInt32 from InputStream

I need to communicate with a server that has a special message format: Each message begins with 4 bytes (together a unsigned long / UInt32 in big endian format) which determines the length of the following message. After those 4 bytes the message is sent as a normal string
So I first need to read 4 bytes into an Integer (32 bit unsigned). In Java I do this like:
DataInputStream is;
...
int len = is.readInt();
How can I do this in Swift 4?
At the moment I use
var lengthbuffer = [UInt8](repeating: 0, count: 4)
let bytecount = istr.read(&lengthbuffer, maxLength: 4)
let lengthbytes = lengthbuffer[0...3]
let bigEndianValue = lengthbytes.withUnsafeBufferPointer {
($0.baseAddress!.withMemoryRebound(to: UInt32.self, capacity: 1) { $0 })
}.pointee
let bytes_expected = Int(UInt32(bigEndian: bigEndianValue))
But this looks not like this is the most elegant way. And furthermore, sometimes (I cannot reproduces it reliably) there is a wrong value read (too big). When I then try to allocate memory for the following message, the app crashes:
let buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bytes_expected)
let bytes_read = istr.read(buffer, maxLength: bytes_expected)
So what is the swift way to read a UInt32 from a InputStream?
EDIT:
My current code (implemented things from the comments. Thanks!) looks like this:
private let inputStreamAccessQueue = DispatchQueue(label: "SynchronizedInputStreamAccess") // NOT concurrent!!!
// This is called on Stream.Event.hasBytesAvailable
func handleInput() {
self.inputStreamAccessQueue.sync(flags: .barrier) {
guard let istr = self.inputStream, istr.hasBytesAvailable else {
log.error(self.buildLogMessage("handleInput() called when inputstream has no bytes available"))
return
}
let lengthbuffer = UnsafeMutablePointer<UInt8>.allocate(capacity: 4)
defer { lengthbuffer.deallocate(capacity: 4) }
let lenbytes_read = istr.read(lengthbuffer, maxLength: 4)
guard lenbytes_read == 4 else {
self.errorHandler(NetworkingError.InputError("Input Stream received \(lenbytes_read) (!=4) bytes"))
return
}
let bytes_expected = Int(UnsafeRawPointer(lengthbuffer).load(as: UInt32.self).bigEndian)
log.info(self.buildLogMessage("expect \(bytes_expected) bytes"))
let buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bytes_expected)
let bytes_read = istr.read(buffer, maxLength: bytes_expected)
guard bytes_read == bytes_expected else {
print("Error: Expected \(bytes_expected) bytes, read \(bytes_read)")
return
}
guard let message = String(bytesNoCopy: buffer, length: bytes_expected, encoding: .utf8, freeWhenDone: true) else {
log.error("ERROR WHEN READING")
return
}
self.handleMessage(message)
}
}
This works most of the time, but sometimes istr.read() does not read bytes_expected bytes but bytes_read < bytes_expected. This results in another hasbytesAvailable event and handleInput() is called again. This time, of course, the first 4 bytes that are read do not contain the length of a new message but some content of the last message. But my code does not know that, so the first bytes are interpreted as the length. In many cases this is a real big value => allocating too much memory => crash
I think this is the explanation for the bug. But how to solve it?
Call read() on the stream while hasBytesAvailable = true? Is there maybe a better solution?
I would assume that when I loop, the hasBytesAvailableEvent would still happen after every read() => handleInput would still be called again too early... How can I avoid this?
EDIT 2: I have implemented the loop now, unfortunately it is still crashing with the same error (and probably same reason). Relevant code:
let bytes_expected = Int(UnsafeRawPointer(lengthbuffer).load(as: UInt32.self).bigEndian)
var message = ""
var bytes_missing = bytes_expected
while bytes_missing > 0 {
print("missing", bytes_missing)
let buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bytes_missing)
let bytes_read = istr.read(buffer, maxLength: bytes_missing)
guard bytes_read > 0 else {
print("bytes_read not <= 0: \(bytes_read)")
return
}
guard bytes_read <= bytes_missing else {
print("Read more bytes than expected. missing=\(bytes_missing), read=\(bytes_read)")
return
}
guard let partial_message = String(bytesNoCopy: buffer, length: bytes_expected, encoding: .utf8, freeWhenDone: true) else {
log.error("ERROR WHEN READING")
return
}
message = message + partial_message
bytes_missing -= bytes_read
}
My console output when it crashes:
missing 1952807028 malloc: * mach_vm_map(size=1952808960) failed
(error code=3)
* error: can't allocate region
*** set a breakpoint in malloc_error_break to debug
So it seems that the whole handleInput() method is called too early, although I use the barrier! What am I doing wrong?
I‘d do it like this (ready to be pasted into a playground):
import Foundation
var stream = InputStream(data: Data([0,1,0,0]))
stream.open()
defer { stream.close() }
var buffer = UnsafeMutablePointer<UInt8>.allocate(capacity: 4)
defer { buffer.deallocate(capacity: 4) }
guard stream.read(buffer, maxLength: 4) >= 4 else {
// handle all cases: end of stream, error, waiting for more data to arrive...
fatalError()
}
let number = UnsafeRawPointer(buffer).load(as: UInt32.self)
number // 256
number.littleEndian // 256
number.bigEndian // 65536
Using UnsafeRawPointer.load directly (without explicit rebinding) is safe for trivial types according to the documentation. Trivial types are generally those that don‘t require ARC operations.
Alternatively, you can access the same memory as a different type without rebinding through untyped memory access, so long as the bound type and the destination type are trivial types.
I would suggest load(as:) to convert the buffer to the UInt32, and I would make sure you make the endianness explicit, e.g.
let value = try stream.read(type: UInt32.self, endianness: .little)
Where:
enum InputStreamError: Error {
case readFailure
}
enum Endianness {
case little
case big
}
extension InputStream {
func read<T: FixedWidthInteger>(type: T.Type, endianness: Endianness = .little) throws -> T {
let size = MemoryLayout<T>.size
var buffer = [UInt8](repeating: 0, count: size)
let count = read(&buffer, maxLength: size)
guard count == size else {
throw InputStreamError.readFailure
}
return buffer.withUnsafeBytes { pointer -> T in
switch endianness {
case .little: return T(littleEndian: pointer.load(as: T.self))
case .big: return T(bigEndian: pointer.load(as: T.self))
}
}
}
func readFloat(endianness: Endianness) throws -> Float {
return try Float(bitPattern: read(type: UInt32.self, with: endianness))
}
func readDouble(endianness: Endianness) throws -> Double {
return try Double(bitPattern: read(type: UInt64.self, with: endianness))
}
}
Note, I made read(type:endianness:) a generic, so it can be reused with any of the standard integer types. I have also thrown in readFloat and readDouble for good measure.

How to get total used memory

Can anyone help me get the total used memory of iPhone? I am working on an app to find all the details of iPhone. I have found 2-3 posts related to this question but they are either in Obj-C or a different language/syntaxsyntax. I have been working a lot to change the obj-c code to swift but got stuck somewhere down the path.
I am trying Available memory for iPhone OS app
but everything looks different in Swift.
Also the syntax of mach is pretty much difficult. Can anybody provide some example with some explanation on it. I can get the all the information of the memory from here https://github.com/Shmoopi/iOS-System-Services/blob/master/System%20Services/Utilities/SSMemoryInfo.m
But due to the copyright issue I can't use other's work. Also I don't understand anything from there.
Here you go. This will print out used memory in bytes.
var info = mach_task_basic_info()
var count = mach_msg_type_number_t(MemoryLayout<mach_task_basic_info>.size)/4
let kerr: kern_return_t = withUnsafeMutablePointer(to: &info) {
$0.withMemoryRebound(to: integer_t.self, capacity: 1) {
task_info(mach_task_self_,
task_flavor_t(MACH_TASK_BASIC_INFO),
$0,
&count)
}
}
if kerr == KERN_SUCCESS {
print("Memory in use (in bytes): \(info.resident_size)")
}
else {
print("Error with task_info(): " +
(String(cString: mach_error_string(kerr), encoding: String.Encoding.ascii) ?? "unknown error"))
}
#Bikram
You can use below function to get total amount of memory used by system-
func getUsedMemory() {
var usedMemory: Int64 = 0
let hostPort: mach_port_t = mach_host_self()
var host_size: mach_msg_type_number_t = mach_msg_type_number_t(MemoryLayout<vm_statistics_data_t>.stride / MemoryLayout<integer_t>.stride)
var pagesize:vm_size_t = 0
host_page_size(hostPort, &pagesize)
var vmStat: vm_statistics = vm_statistics_data_t()
let status: kern_return_t = withUnsafeMutableBytes(of: &vmStat) {
let boundPtr = $0.baseAddress?.bindMemory(to: Int32.self, capacity: MemoryLayout.size(ofValue: vmStat) / MemoryLayout<Int32>.stride)
return host_statistics(hostPort, HOST_VM_INFO, boundPtr, &host_size)
}
// Now take a look at what we got and compare it against KERN_SUCCESS
if status == KERN_SUCCESS {
usedMemory = (Int64)((vm_size_t)(vmStat.active_count + vmStat.inactive_count + vmStat.wire_count) * pagesize)
}
else {
log("Failed to get Virtual memory inforor")
}
}

Programmatically determining current process’ memory usage on iOS [duplicate]

I'm trying to retrieve the amount of memory my iPhone app is using at anytime, programmatically. Yes I'm aware about ObjectAlloc/Leaks. I'm not interested in those, only to know if it's possible to write some code and get the amount of bytes being used and report it via NSLog.
Thanks.
To get the actual bytes of memory that your application is using, you can do something like the example below. However, you really should become familiar with the various profiling tools as well as they are designed to give you a much better picture of usage over-all.
#import <mach/mach.h>
// ...
void report_memory(void) {
struct task_basic_info info;
mach_msg_type_number_t size = TASK_BASIC_INFO_COUNT;
kern_return_t kerr = task_info(mach_task_self(),
TASK_BASIC_INFO,
(task_info_t)&info,
&size);
if( kerr == KERN_SUCCESS ) {
NSLog(#"Memory in use (in bytes): %lu", info.resident_size);
NSLog(#"Memory in use (in MiB): %f", ((CGFloat)info.resident_size / 1048576));
} else {
NSLog(#"Error with task_info(): %s", mach_error_string(kerr));
}
}
There is also a field in the structure info.virtual_size which will give you the number of bytes available virtual memory (or memory allocated to your application as potential virtual memory in any event). The code that pgb links to will give you the amount of memory available to the device and what type of memory it is.
This has been tested on Xcode 11 in Mojave 10.4.6 on 07/01/2019, and on Xcode 11.3 as of 11/05/2020
All of the previous answers return the incorrect result.
Two Swift versions are below.
Here is how to get the expected value written by Apple's Quinn “The Eskimo!”.
This uses the phys_footprint var from Darwin > Mach > task_info and closely matches the value in the memory gauge in Xcode's Debug navigator.
The value returned is in bytes.
https://forums.developer.apple.com/thread/105088#357415
Original code follows.
func memoryFootprint() -> mach_vm_size_t? {
// The `TASK_VM_INFO_COUNT` and `TASK_VM_INFO_REV1_COUNT` macros are too
// complex for the Swift C importer, so we have to define them ourselves.
let TASK_VM_INFO_COUNT = mach_msg_type_number_t(MemoryLayout<task_vm_info_data_t>.size / MemoryLayout<integer_t>.size)
let TASK_VM_INFO_REV1_COUNT = mach_msg_type_number_t(MemoryLayout.offset(of: \task_vm_info_data_t.min_address)! / MemoryLayout<integer_t>.size)
var info = task_vm_info_data_t()
var count = TASK_VM_INFO_COUNT
let kr = withUnsafeMutablePointer(to: &info) { infoPtr in
infoPtr.withMemoryRebound(to: integer_t.self, capacity: Int(count)) { intPtr in
task_info(mach_task_self_, task_flavor_t(TASK_VM_INFO), intPtr, &count)
}
}
guard
kr == KERN_SUCCESS,
count >= TASK_VM_INFO_REV1_COUNT
else { return nil }
return info.phys_footprint
}
Modifying this slightly to create a class level set of Swift methods allows easy return of the actual bytes and formatted output in MB for display. I use this as part of an automated UITest suite to log memory used before and after multiple iterations of the same test to see if we have any potential leaks or allocations we need to look into.
// Created by Alex Zavatone on 8/1/19.
//
class Memory: NSObject {
// From Quinn the Eskimo at Apple.
// https://forums.developer.apple.com/thread/105088#357415
class func memoryFootprint() -> Float? {
// The `TASK_VM_INFO_COUNT` and `TASK_VM_INFO_REV1_COUNT` macros are too
// complex for the Swift C importer, so we have to define them ourselves.
let TASK_VM_INFO_COUNT = mach_msg_type_number_t(MemoryLayout<task_vm_info_data_t>.size / MemoryLayout<integer_t>.size)
let TASK_VM_INFO_REV1_COUNT = mach_msg_type_number_t(MemoryLayout.offset(of: \task_vm_info_data_t.min_address)! / MemoryLayout<integer_t>.size)
var info = task_vm_info_data_t()
var count = TASK_VM_INFO_COUNT
let kr = withUnsafeMutablePointer(to: &info) { infoPtr in
infoPtr.withMemoryRebound(to: integer_t.self, capacity: Int(count)) { intPtr in
task_info(mach_task_self_, task_flavor_t(TASK_VM_INFO), intPtr, &count)
}
}
guard
kr == KERN_SUCCESS,
count >= TASK_VM_INFO_REV1_COUNT
else { return nil }
let usedBytes = Float(info.phys_footprint)
return usedBytes
}
class func formattedMemoryFootprint() -> String
{
let usedBytes: UInt64? = UInt64(self.memoryFootprint() ?? 0)
let usedMB = Double(usedBytes ?? 0) / 1024 / 1024
let usedMBAsString: String = "\(usedMB)MB"
return usedMBAsString
}
}
Enjoy!
Note: an enterprising coder may want to add a static formatter to the class so that usedMBAsString only returns 2 significant decimal places.
The headers forTASK_BASIC_INFO say:
/* Don't use this, use MACH_TASK_BASIC_INFO instead */
Here is a version using MACH_TASK_BASIC_INFO:
void report_memory(void)
{
struct mach_task_basic_info info;
mach_msg_type_number_t size = MACH_TASK_BASIC_INFO_COUNT;
kern_return_t kerr = task_info(mach_task_self(),
MACH_TASK_BASIC_INFO,
(task_info_t)&info,
&size);
if( kerr == KERN_SUCCESS ) {
NSLog(#"Memory in use (in bytes): %u", info.resident_size);
} else {
NSLog(#"Error with task_info(): %s", mach_error_string(kerr));
}
}
Here is report_memory() enhanced to rapidly show leak status in the NSLog().
void report_memory(void) {
static unsigned last_resident_size=0;
static unsigned greatest = 0;
static unsigned last_greatest = 0;
struct task_basic_info info;
mach_msg_type_number_t size = sizeof(info);
kern_return_t kerr = task_info(mach_task_self(),
TASK_BASIC_INFO,
(task_info_t)&info,
&size);
if( kerr == KERN_SUCCESS ) {
int diff = (int)info.resident_size - (int)last_resident_size;
unsigned latest = info.resident_size;
if( latest > greatest ) greatest = latest; // track greatest mem usage
int greatest_diff = greatest - last_greatest;
int latest_greatest_diff = latest - greatest;
NSLog(#"Mem: %10u (%10d) : %10d : greatest: %10u (%d)", info.resident_size, diff,
latest_greatest_diff,
greatest, greatest_diff );
} else {
NSLog(#"Error with task_info(): %s", mach_error_string(kerr));
}
last_resident_size = info.resident_size;
last_greatest = greatest;
}
Swift solution of Jason Coco's answer:
func reportMemory() {
let name = mach_task_self_
let flavor = task_flavor_t(TASK_BASIC_INFO)
let basicInfo = task_basic_info()
var size: mach_msg_type_number_t = mach_msg_type_number_t(sizeofValue(basicInfo))
let pointerOfBasicInfo = UnsafeMutablePointer<task_basic_info>.alloc(1)
let kerr: kern_return_t = task_info(name, flavor, UnsafeMutablePointer(pointerOfBasicInfo), &size)
let info = pointerOfBasicInfo.move()
pointerOfBasicInfo.dealloc(1)
if kerr == KERN_SUCCESS {
print("Memory in use (in bytes): \(info.resident_size)")
} else {
print("error with task info(): \(mach_error_string(kerr))")
}
}
Swift 3.1 (As of August 8, 2017)
func getMemory() {
var taskInfo = mach_task_basic_info()
var count = mach_msg_type_number_t(MemoryLayout<mach_task_basic_info>.size)/4
let kerr: kern_return_t = withUnsafeMutablePointer(to: &taskInfo) {
$0.withMemoryRebound(to: integer_t.self, capacity: 1) {
task_info(mach_task_self_, task_flavor_t(MACH_TASK_BASIC_INFO), $0, &count)
}
}
if kerr == KERN_SUCCESS {
let usedMegabytes = taskInfo.resident_size/(1024*1024)
print("used megabytes: \(usedMegabytes)")
} else {
print("Error with task_info(): " +
(String(cString: mach_error_string(kerr), encoding: String.Encoding.ascii) ?? "unknown error"))
}
}
Here's a Swift 3 Version:
func mach_task_self() -> task_t {
return mach_task_self_
}
func getMegabytesUsed() -> Float? {
var info = mach_task_basic_info()
var count = mach_msg_type_number_t(MemoryLayout.size(ofValue: info) / MemoryLayout<integer_t>.size)
let kerr = withUnsafeMutablePointer(to: &info) { infoPtr in
return infoPtr.withMemoryRebound(to: integer_t.self, capacity: Int(count)) { (machPtr: UnsafeMutablePointer<integer_t>) in
return task_info(
mach_task_self(),
task_flavor_t(MACH_TASK_BASIC_INFO),
machPtr,
&count
)
}
}
guard kerr == KERN_SUCCESS else {
return nil
}
return Float(info.resident_size) / (1024 * 1024)
}
Objective-C version:
size_t memoryFootprint()
{
task_vm_info_data_t vmInfo;
mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
kern_return_t result = task_info(mach_task_self(), TASK_VM_INFO, (task_info_t) &vmInfo, &count);
if (result != KERN_SUCCESS)
return 0;
return static_cast<size_t>(vmInfo.phys_footprint);
}

Resources