Calculate factorial of a decimal (i.e. Gamma function) on iOS - ios

I need to calculate the factorial of a decimal number, say 6.4, on iOS. I tried
double myFactorial = gamma(6.4);
But get the error "'gamma is unavailable': not avaiable on iOS". Is there a way to add the gamma function into iOS?

Have you tried:
tgamma(6.4)
I see it working in my code.
There's also:
double tgamma (double x)
float tgammaf (float x)
long double tgammal (long double x)

you can try like this logic may be this will well you.
- (int)factorial:(int)operand
{
if`enter code here`(operand < 0)
return -1;
else if (operand > 1)
return operand * [self factorial:operand - 1];
else
return 1;
}
and then
- (double)factorial:(double)operand
{
double output = operand;
if (output == 0) output = 1; // factorial of 0 is 1
else if (output < 0) output = NAN;
else if (output > 0)
{
if (fmod(output, floor(output)) == 0) // integer
output = round(exp(lgamma(output + 1)));
else // natural number
output = exp(lgamma(output + 1));
}
return output;
}
- (double)n:(double)n chooseR:(double)r
{
return round(exp((lgamma(n+1)) - (lgamma(r+1) + lgamma(n-r+1))));
}
- (double)n:(double)n pickR:(double)r
{
return round(exp(lgamma(n+1) - lgamma(n-r+1)));
}

Related

Network Delay Problem - Complexity Analysis

Below is a solution Network delay problem of leetcode. I have written a all test case success solution. But not able to analyse the time complexity. I believe its O(V^2 + E) where V is the number of nodes and E edges.
In this solution though I am adding all adjacents of each node every time, but not processing them further if there exists a min distance for that node already.
Leetcode question link https://leetcode.com/problems/network-delay-time
public int networkDelayTime(int[][] times, int n, int k) {
int[] distances = new int[n+1];
Arrays.fill(distances , -1);
if(n > 0){
List<List<int[]>> edges = new ArrayList<List<int[]>>();
for(int i = 0 ; i <= n ; i++){
edges.add(new ArrayList<int[]>());
}
for(int[] time : times){
edges.get(time[0]).add(new int[]{time[1] , time[2]});
}
Queue<Vertex> queue = new LinkedList<>();
queue.add(new Vertex(k , 0));
while(!queue.isEmpty()){
Vertex cx = queue.poll();
int index = cx.index;
int distance = cx.distance;
//process adjacents only if distance is updated
if(distances[index] == -1 || distances[index] > distance){
distances[index] = distance;
List<int[]> adjacents = edges.get(index);
for(int[] adjacent : adjacents){
queue.add(new Vertex(adjacent[0] , adjacent[1]+distance));
}
}
}
}
int sum = 0;
for(int i = 1 ; i <= n; i++){
int distance = distances[i];
if(distance == -1){
return -1;
}
sum = Math.max(sum , distance);
}
return sum;
}
public static class Vertex{
int index;
int distance;
public Vertex(int i , int d){
index = i;
distance = d;
}
}
You should use PriorityQueue instead of LinkedList

There's something wrong in converting decimal to binary

int main(){
int input;
int bin = 0, i = 1;
print("Please input a number");
input = num.parse(stdin.readLineSync());
while(input > 0)
{
bin = bin + (input % 2)*i;
input = input/2;
i = i * 10;
}
return 0;
}
It returned infinite numbers.
You just need to take care of double to int conversion: input = (input/2).floor()
See this working code:
void main() {
int input;
int bin = 0, i = 1;
input = 5;
while(input > 0)
{
bin = bin + (input % 2)*i;
input = (input/2).floor();
i = i * 10;
}
print(bin);
}
Here is a version of the above function that:
uses the integer division
with a ternary conditional operator, avoids the conversion to string.
sets the most significant bit to the left (bin = (dec % 2) + bin; which is what most people expects, but is not what the original snippet did)
String dec2bin(int dec) {
var bin = '';
while (dec > 0) {
bin = (dec % 2 == 0 ? '0' : '1') + bin;
dec ~/= 2;
}
return bin;
}
P.S: But, of course, one can simply write:
var bin = num.toRadixString(2);
There is no real need to write your own dec2bin function.
As the int result can become easily big in length and the max int value in dart is 2e53 (and much less if you compile to web). it's better to change the approach and return as a String.
String dec2bin(int decimal) {
String bin = '';
while (decimal > 0) {
bin = bin + (decimal % 2).toString();
decimal = (decimal / 2).floor();
}
return bin;
}
print(dec2bin(132070242815));
Result: 1111111110011111111111111111110101111
//number is always in int
static String decimalToBinary(int number) {
return number.toRadixString(2);
}
//binary is always in string
static int binaryToDecimal(String binary) {
return int.parse(binary, radix: 2);
}

Testcase failed after converting codes from Objective-C to Swift

I am doing some bitwise operations in Swift style, which these codes are originally written in Objective-C/C. I use UnsafeMutablePointer to state the beginning index of memory address and use UnsafeMutableBufferPointer for accessing the element within the scope.
You can access the original Objective-C file Here.
public init(size: Int) {
self.size = size
self.bitsLength = (size + 31) / 32
self.startIdx = UnsafeMutablePointer<Int32>.alloc(bitsLength * sizeof(Int32))
self.bits = UnsafeMutableBufferPointer(start: startIdx, count: bitsLength)
}
/**
* #param from first bit to check
* #return index of first bit that is set, starting from the given index, or size if none are set
* at or beyond its given index
*/
public func nextSet(from: Int) -> Int {
if from >= size { return size }
var bitsOffset = from / 32
var currentBits: Int32 = bits[bitsOffset]
currentBits &= ~((1 << (from & 0x1F)) - 1).to32
while currentBits == 0 {
if ++bitsOffset == bitsLength {
return size
}
currentBits = bits[bitsOffset]
}
let result: Int = bitsOffset * 32 + numberOfTrailingZeros(currentBits).toInt
return result > size ? size : result
}
func numberOfTrailingZeros(i: Int32) -> Int {
var i = i
guard i != 0 else { return 32 }
var n = 31
var y: Int32
y = i << 16
if y != 0 { n = n - 16; i = y }
y = i << 8
if y != 0 { n = n - 8; i = y }
y = i << 4
if y != 0 { n = n - 4; i = y }
y = i << 2
if y != 0 { n = n - 2; i = y }
return n - Int((UInt((i << 1)) >> 31))
}
Testcase:
func testGetNextSet1() {
// Passed
var bits = BitArray(size: 32)
for i in 0..<bits.size {
XCTAssertEqual(32, bits.nextSet(i), "\(i)")
}
// Failed
bits = BitArray(size: 34)
for i in 0..<bits.size {
XCTAssertEqual(34, bits.nextSet(i), "\(i)")
}
}
Can someone guide me why the second testcase fail but the objective-c version pass ?
Edit: As #vacawama mentioned: If you break testGetNextSet into 2 tests, both pass.
Edit2: When I run tests with xctool, and tests which calling BitArray's nextSet() will crash while running.
Objective-C version of numberOfTrailingZeros:
// Ported from OpenJDK Integer.numberOfTrailingZeros implementation
- (int32_t)numberOfTrailingZeros:(int32_t)i {
int32_t y;
if (i == 0) return 32;
int32_t n = 31;
y = i <<16; if (y != 0) { n = n -16; i = y; }
y = i << 8; if (y != 0) { n = n - 8; i = y; }
y = i << 4; if (y != 0) { n = n - 4; i = y; }
y = i << 2; if (y != 0) { n = n - 2; i = y; }
return n - (int32_t)((uint32_t)(i << 1) >> 31);
}
When translating numberOfTrailingZeros, you changed the return value from Int32 to Int. That is fine, but the last line of the function is not operating properly as you translated it.
In numberOfTrailingZeros, replace this:
return n - Int((UInt((i << 1)) >> 31))
With this:
return n - Int(UInt32(bitPattern: i << 1) >> 31)
The cast to UInt32 removes all but the lower 32 bits. Since you were casting to UInt, you weren't removing those bits. It is necessary to use bitPattern to make this happen.
Finally I found out that startIdx just need to be initialized after allocation.
self.startIdx = UnsafeMutablePointer<Int32>.alloc(bitsLength * sizeof(Int32))
self.startIdx.initializeFrom(Array(count: bitsLength, repeatedValue: 0))
Or use calloc with just one line code:
self.startIdx = unsafeBitCast(calloc(bitsLength, sizeof(Int32)), UnsafeMutablePointer<Int32>.self)
Furthermore, I use lazy var to defer the Initialization of UnsafeMutableBufferPointer until the property is first used.
lazy var bits: UnsafeMutableBufferPointer<Int32> = {
return UnsafeMutableBufferPointer<Int32>(start: self.startIdx, count: self.bitsLength)
}()
On the other hand, don't forget to deinit:
deinit {
startIdx.destroy()
startIdx.dealloc(bitsLength * sizeof(Int32))
}

iOS : round off to nearest number

Lets say I've following NSInteger's :
111
246
99
82
92
85
Is there a function which converts (round off) like these numbers like :
110
250
100
80
90
85
Looking at your requested result, numbers less than 100 are rounded to the nearest 5, and those over rounded to the nearest 10. By doing this, you can get the desired result
- (NSInteger)roundToNearest:(NSInteger)inputNum
{
if (inputNum < 100){
return roundf(inputNum / 5.0f) * 5;
}
else {
return roundf(inputNum / 10.0f) * 10;
}
}
// This Generic function can solve the purpose you can modify it according to your requirement.
- (NSInteger)roundOffNumberToNearestCompleteNo:(NSInteger)number
{
if(number <= 0)
return 0;
NSString *strNumber = [NSString stringWithFormat:#"%ld",number];
NSInteger digitCount = [strNumber length];
if(digitCount == 1)
{
if(number <= 5)
{
return 1;
}
else
{
return 10;
}
}
NSString *strNumberUnit = #"1";
for(int i = 1; i < digitCount; i++)
{
strNumberUnit = [strNumberUnit stringByAppendingString:#"0"];
}
NSInteger numberUnit = [strNumberUnit integerValue];
NSInteger reminder = number%numberUnit;
if(reminder <= (numberUnit/2))
{
number = number - reminder;
}
else
{
number = number + (numberUnit - reminder);
}
return number;
}
You should use
- (NSInteger)roundFive:(NSInteger)number
{
return roundf(number/5.)*5;
}
Why not use a more generic and thus reusable method? This seems to work for all valid NSIntegers including negative numbers and checks for div0 but someone let me know if they spot a flaw:
- (NSInteger)roundInteger:(NSInteger)num toNearestInteger:(NSInteger)roundTo
{
if (!roundTo) { return 0; }
return (roundTo * round((float)num / roundTo));
}
Probably easier to implement it yourself:
- (NSInteger)roundOff:(NSInteger)number
{
NSInteger remainder = number % 5;
NSInteger quotient = number / 5;
if (remainder > 0) {
return (quotient + 1) * 5;
}
else {
return quotient * 5;
}
}

How to print all possible solutions for Longest Common subsequence

I want to print all the possible solutions to LCS problem.
The two strings abcbdab and bdcaba should print following 3 strings:
bdab,bcba,bcab.
C is the global matrix table which takes values according to algorithm and m, n are the length of the sequences a, b.
But The output is something unexpected.
#include<stdio.h>
#include<conio.h>
int co=0,m=0,n=0,c[10][10];
char a[10],b[10];
void main()
{
int i,j;
clrscr();
printf("Enter Two strings: ");
scanf("%s",a);
scanf("%s",b);
m=strlen(a);
n=strlen(b);
for(i=0;i<=m;i++)
{
for(j=0;j<=n;j++)
{ if(i==0 || j==0)
{
c[i][j]=0;
}
else if(a[i-1]==b[j-1])
{
c[i][j]=c[i-1][j-1]+1;
}
else if(c[i-1][j]>=c[i][j-1])
{
c[i][j]=c[i-1][j];
}
else
{
c[i][j]=c[i][j-1];
}
}
}
for(i=0;i<=m;i++)
{
for(j=0;j<=n;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
print(m,n);
getch();
}
print(int i,int j)
{
if(i==0 || j==0)
return 0;
else if(a[i-1]==b[j-1])
{
print(i-1,j-1);
if(co==c[m][n])
{
co=0;
printf("\n");
}
printf("%c",a[i-1]);
co++;
}
else if(c[i-1][j]==c[i][j-1])
{
print(i-1,j);
print(i,j-1);
}
else if(c[i][j-1]>=c[i-1][j])
print(i,j-1);
else
print(i-1,j);
return;
}
Here you can find a recursive approach of how to do this: Reading out all LCSs
Here is my code for this approach in Java:
private Set<String> lcs(int[][] dp, String fst, String snd, int i, int j) {
Set<String> lcss = new HashSet<>();
if (i == 0 || j == 0) {
lcss.add("");
} else if (fst.charAt(i - 1) == snd.charAt(j - 1)) {
for (String lcs : lcs(dp, fst, snd, i - 1, j - 1)) {
lcss.add(lcs + fst.charAt(i - 1));
}
} else {
if (dp[i - 1][j] >= dp[i][j - 1]) {
lcss.addAll(lcs(dp, fst, snd, i - 1, j));
}
if (dp[i][j - 1] >= dp[i - 1][j]) {
lcss.addAll(lcs(dp, fst, snd, i, j - 1));
}
}
return lcss;
}
Here is the Java code with comments explaining how to print all possible lcs.
import java.util.HashSet;
import java.util.Scanner;
import java.util.Set;
public class LongestCommonSubsequence {
public static int[][] LCSmatrix(String X, String Y) {
//we ignore the top most row and left most column in this matrix
//so we add 1 and create a matrix with appropriate row and column size
int m = X.length() + 1, n = Y.length() + 1;
int[][] c = new int[m][n];
for (int i = 1; i < m; i++) {
for (int j = 1; j < n; j++) {
//since we added 1 to row size and column size,
// we substract 1 from i,j to find the char at that index
if (X.charAt(i - 1) == Y.charAt(j - 1)) {
c[i][j] = c[i - 1][j - 1] + 1;
} else if (c[i - 1][j] >= c[i][j - 1]) {
c[i][j] = c[i - 1][j];
} else {
c[i][j] = c[i][j - 1];
}
}
}
printMatrix(c);
return c;
}
public static void printMatrix(int[][] grid) {
for (int r = 0; r < grid.length; r++) {
for (int c = 0; c < grid[r].length; c++) {
System.out.print(grid[r][c] + " ");
}
System.out.println();
}
}
public static void allLCS(int[][] c, String X, String Y, int i, int j, Set<String> setLCS, String s) {
//return when either of the string length is 0
if (i == 0 || j == 0) {
setLCS.add(s);
return;
}
//if last characters are equal, they belong in lcs
if (X.charAt(i - 1) == Y.charAt(j - 1)) {
//prepend the char to lcs since, we are going backwards
s = X.charAt(i - 1) + s;
//continue finding lcs in substrings X.substring(0,i-1) and Y.substring(0,j-1)
allLCS(c, X, Y, i - 1, j - 1, setLCS, s);
} // if there is a tie in matrix cells, we backtrack in both ways,
// else one way, which ever is greater
else if (c[i - 1][j] == c[i][j - 1]) {
//continue finding lcs in substring X.substring(0,i-1)
allLCS(c, X, Y, i - 1, j, setLCS, s);
//continue finding lcs in substring Y.substring(0,j-1)
allLCS(c, X, Y, i, j - 1, setLCS, s);
} else if (c[i - 1][j] > c[i][j - 1]) {
allLCS(c, X, Y, i - 1, j, setLCS, s);
} else {
allLCS(c, X, Y, i, j - 1, setLCS, s);
}
}
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
System.out.println(" Enter String X and Y : ");
String X = sc.next();
String Y = sc.next();
sc.close();
Set<String> set = new HashSet<String>();
allLCS(LCSmatrix(X, Y), X, Y, X.length(), Y.length(), set, "");
System.out.println(set.toString());
}
}
class Solution
{
public int function1(String s,String t,int n,int m,int dp[][]){
if(n==0 || m==0){
return 0;
}
if(dp[n][m]!=-1){
return dp[n][m];
}
if(s.charAt(n-1)==t.charAt(m-1)){
return dp[n][m]=1+function1(s,t,n-1,m-1,dp);
}
return dp[n][m]=Math.max(function1(s,t,n-1,m,dp),function1(s,t,n,m-1,dp));
}
public HashSet<String> function2(String s,String t,int n,int m,int dp[][],HashMap<String,HashSet<String>> map){
HashSet<String> temp=new HashSet<String>();
String key=n+"-"+m;
if(n==0 || m==0){
temp.add("");
return temp;
}
if(map.containsKey(key)){
return map.get(key);
}
if(s.charAt(n-1)==t.charAt(m-1)){
for(String tempstr:function2(s,t,n-1,m-1,dp,map)){
temp.add(tempstr+s.substring(n-1,n));
}
}
else{
if(dp[n-1][m]>=dp[n][m-1]){
temp.addAll(function2(s,t,n-1,m,dp,map));
}
if(dp[n-1][m]<=dp[n][m-1]){
temp.addAll(function2(s,t,n,m-1,dp,map));
}
}
map.put(key,temp);
return temp;
}
public List<String> all_longest_common_subsequences(String s, String t)
{
int n=s.length();
int m=t.length();
int dp[][]=new int[n+1][m+1];
for(int i=0;i<=n;i++){
for(int j=0;j<=m;j++){
dp[i][j]=-1;
}
}
function1(s,t,n,m,dp);
HashMap<String,HashSet<String>> map=new HashMap<String,HashSet<String>>();
ArrayList<String> ans=new ArrayList<String>(function2(s,t,n,m,dp,map));
Collections.sort(ans);
return ans;
}
}
Your source code is not printing the lcs. It is actually calculating the length of lcs. Source code given by you is totally wrong. First try to print one lcs. Then extend that solution to print all the lcs. For your help given below is working java solution.
static int arr[][];
static void lcs(String s1, String s2) {
for (int i = 1; i <= s1.length(); i++) {
for (int j = 1; j <= s2.length(); j++) {
if (s1.charAt(i - 1) == s2.charAt(j - 1))
arr[i][j] = arr[i - 1][j - 1] + 1;
else
arr[i][j] = Math.max(arr[i - 1][j], arr[i][j - 1]);
}
}
}
static Set<String> lcs(String s1, String s2, int len1, int len2) {
if (len1 == 0 || len2 == 0) {
Set<String> set = new HashSet<String>();
set.add("");
return set;
}
if (s1.charAt(len1 - 1) == s2.charAt(len2 - 1)) {
Set<String> set = lcs(s1, s2, len1 - 1, len2 - 1);
Set<String> set1 = new HashSet<>();
for (String temp : set) {
temp = temp + s1.charAt(len1 - 1);
set1.add(temp);
}
return set1;
} else {
Set<String> set = new HashSet<>();
Set<String> set1 = new HashSet<>();
if (arr[len1 - 1][len2] >= arr[len1][len2 - 1]) {
set = lcs(s1, s2, len1 - 1, len2);
}
if (arr[len1][len2 - 1] >= arr[len1 - 1][len2]) {
set1 = lcs(s1, s2, len1, len2 - 1);
}
for (String temp : set) {
set1.add(temp);
}
//System.out.println("In lcs" + set1);
return set1;
}
}
public static void main(String[] args) {
String s1 = "abcbdab";
String s2 = "bdcaba ";
arr = new int[s1.length() + 1][s2.length() + 1];
lcs(s1, s2);
System.out.println(lcs(s1, s2, s1.length(), s2.length()));
}
If last character of strings are equal then they must be in lcs. If they are not equal lcs will be either constructed from upper side of matrix or left side of matrix depending upon which value is greater. If both the value is equal then lcs will be constructed from both the side. So keep constructing the lcs until you have constructed all the lcs and store them in a set.

Resources