Z3 vZ - Adding constraint improves optimum - z3

I'm new to using Z3 and am trying to model an ILP, which I have already successfully done using the MILP Solver PuLP. I now implemented the same objective function (which is to be minimized) and the same constraints in Z3 and am experiencing strange behaviour. Mainly, that adding a constraint decreases the minimum.
My question is: How can that be? Is it a bug or can it be explained somehow?
More detail, in case needed:
I'm trying to solve a Teacher Assignment Problem. Courses are scheduled before a year and the teachers get the list of the courses. They then can select which courses they want to teach, with which priority they want to teach it, how many workdays (each course lasts several days) they desire to teach and a max and min number of courses they definitly want to teach. The program gets as input a list of possible teacher-assignments. A teacher-assignment is a tuple consisting of:
teacher-name
event-id
priority of teacher towards event
the distance between teacher and course
The goal of the program to find a combination of assignments that minimize:
the average relative deviation 'desired workdays <-> assigned workdays' of all teachers
the maximum relative deviation 'desired workdays <-> assigned workdays' of any teacher
the overall distance of courses to assigned teachers
the sum of priorities (higher priority means less willingness to teach)
Main Constraints:
number of teachers assigned to course must match needed amount of teachers
the number of assigned courses to a teacher must be within the specified min/max range
the courses to which a teacher is assigned may not overlap in time (a list of overlap-sets are given)
To track the average relative deviation and the maximum deviation of workdays two more 'helper-constraints' are introduced:
for each teacher: overload (delta_plus) - underload (delta_minus) = assigned workdays - desired workdays
for each teacher: delta_plus + delta_minus <= max relative deviation (DELTA)
Here you have this as Python code:
from z3 import *
def compute_optimum(a1, a2, a3, a4, worst_case_distance=0):
"""
Find the optimum solution with weights a1, a2, a3, a4
(average workday deviation, maximum workday deviation, cummulative linear distance, sum of priority 2 assignments)
Higher weight = More optimized (value minimized)
Returns all assignment-tuples which occur in the calculated optimal model.
"""
print("Powered by Z3")
print(f"\n\n\n\n ------- FINDING OPTIMUM TO WEIGHTS: a1={a1}, a2={a2}, a3={a3}, a4={a4} -------\n")
# key: assignment tuple value: z3-Bool
x = {assignment : Bool('e%i_%s' % (assignment[1], assignment[0])) for assignment in possible_assignments}
delta_plus = {teacher : Int('d+_%s' % teacher) for teacher in teachers}
delta_minus = {teacher : Int('d-_%s' % teacher) for teacher in teachers}
DELTA = Real('DELTA')
opt = Optimize()
# constraint1: number of teachers needed per event
num_available_per_event = {event : len(list(filter(lambda assignment: assignment[1] == event, possible_assignments))) for event in events}
for event in events:
num_teachers_to_assign = min(event_size[event], num_available_per_event[event])
opt.add(Sum( [If(x[assignment], 1, 0) for assignment in x.keys() if assignment[1] == event] ) == num_teachers_to_assign)
for teacher in teachers:
# constraint2: max and min number of events for each teacher
max_events = len(events)
min_events = 0
num_assigned_events = Sum( [If(x[assignment], 1, 0) for assignment in x.keys() if assignment[0] == teacher] )
opt.add(num_assigned_events >= min_events, num_assigned_events <= max_events)
# constraint3: teacher can't work in multiple overlapping events
for overlapping_events in event_overlap_sets:
opt.add(Sum( [If(x[assignment], 1, 0) for assignment in x.keys() if assignment[1] in overlapping_events and assignment[0] == teacher] ) <= 1)
# constraint4: delta (absolute over and underload of teacher)
num_teacher_workdays = Sum( [If(x[assignment], event_durations[assignment[1]], 0) for assignment in x.keys() if assignment[0] == teacher])
opt.add(delta_plus[teacher] >= 0, delta_minus[teacher] >= 0)
opt.add(delta_plus[teacher] - delta_minus[teacher] == num_teacher_workdays - desired_workdays[teacher])
# constraint5: DELTA (maximum relative deviation of wished to assigned workdays)
opt.add(DELTA >= ToReal(delta_plus[teacher] + delta_minus[teacher]) / desired_workdays[teacher])
#opt.add(DELTA <= 1) # adding this results in better optimum
average_rel_workday_deviation = Sum( [ToReal(delta_plus[teacher] + delta_minus[teacher]) / desired_workdays[teacher] for teacher in teachers]) / len(teachers)
overall_distance = Sum( [If(x[assignment], assignment[3], 0) for assignment in x.keys()])
num_prio2 = Sum( [If(x[assignment], assignment[2]-1, 0) for assignment in x.keys()])
obj_fun = opt.minimize(
a1 * average_rel_workday_deviation
+ a2 * DELTA
+ a3 * overall_distance
+ a4 * num_prio2
)
#print(opt)
if opt.check() == sat:
m = opt.model()
optimal_assignments = []
for assignment in x.keys():
if m.evaluate(x[assignment]):
optimal_assignments.append(assignment)
for teacher in teachers:
print(f"{teacher}: d+ {m.evaluate(delta_plus[teacher])}, d- {m.evaluate(delta_minus[teacher])}")
#print(m)
print("DELTA:::", m.evaluate(DELTA))
print("min value:", obj_fun.value().as_decimal(2))
return optimal_assignments
else:
print("Not satisfiable")
return []
compute_optimum(1,1,1,1)
Sample input:
teachers = ['fr', 'hö', 'pf', 'bo', 'jö', 'sti', 'bi', 'la', 'he', 'kl', 'sc', 'str', 'ko', 'ba']
events = [5, 6, 7, 8, 9, 10, 11, 12]
event_overlap_sets = [{5, 6}, {8, 9}, {10, 11}, {11, 12}, {12, 13}]
desired_workdays = {'fr': 36, 'hö': 50, 'pf': 30, 'bo': 100, 'jö': 80, 'sti': 56, 'bi': 20, 'la': 140, 'he': 5.0, 'kl': 50, 'sc': 38, 'str': 42, 'ko': 20, 'ba': 20}
event_size = {5: 2, 6: 2, 7: 2, 8: 3, 9: 2, 10: 2, 11: 3, 12: 2}
event_durations = {5: 5.0, 6: 5.0, 7: 5.0, 8: 16, 9: 7.0, 10: 5.0, 11: 16, 12: 5.0}
# assignment: (teacher, event, priority, distance)
possible_assignments = [('he', 5, 1, 11), ('sc', 5, 1, 48), ('str', 5, 1, 199), ('ko', 6, 1, 53), ('jö', 7, 1, 317), ('bo', 9, 1, 56), ('sc', 10, 1, 25), ('ba', 11, 1, 224), ('bo', 11, 1, 312), ('jö', 11, 1, 252), ('kl', 11, 1, 248), ('la', 11, 1, 303), ('pf', 11, 1, 273), ('str', 11, 1, 228), ('kl', 5, 2, 103), ('la', 5, 2, 16), ('pf', 5, 2, 48), ('bi', 6, 2, 179), ('la', 6, 2, 16), ('pf', 6, 2, 48), ('sc', 6, 2, 48), ('str', 6, 2, 199), ('sc', 7, 2, 354), ('sti', 7, 2, 314), ('bo', 8, 2, 298), ('fr', 8, 2, 375), ('hö', 9, 2, 95), ('jö', 9, 2, 119), ('sc', 9, 2, 37), ('sti', 9, 2, 95), ('bi', 10, 2, 211), ('hö', 11, 2, 273), ('bi', 12, 2, 408), ('bo', 12, 2, 318), ('ko', 12, 2, 295), ('la', 12, 2, 305), ('sc', 12, 2, 339), ('str', 12, 2, 218)]
Output (just the delta+ and delta-):
------- FINDING OPTIMUM TO WEIGHTS: a1=1, a2=1, a3=1, a4=1 -------
fr: d+ 17, d- 37
hö: d+ 26, d- 69
pf: d+ 0, d- 25
bo: d+ 41, d- 120
jö: d+ 0, d- 59
sti: d+ 27, d- 71
bi: d+ 0, d- 15
la: d+ 0, d- 119
he: d+ 0, d- 0
kl: d+ 0, d- 50
sc: d+ 0, d- 33
str: d+ 0, d- 32
ko: d+ 0, d- 20
ba: d+ 10, d- 14
DELTA::: 19/10
min value: 3331.95?
What I observe that does not make sense to me:
often, neither delta_plus nor delta_minus for a teacher equals 0, DELTA is bigger than 1
adding constraint 'DELTA <= 1' results in a smaller objective function value, faster computation and observation 1 cannot be observed anymore
Also: the computation takes forever (although this is not the point of this)
I am happy for any sort of help!
Edit:
Like suggested by alias, changing the delta+/- variables to Real and removing the two ToReal() statements yields the desired result. If you look at the generated expressions of my sample input, there are in fact slight differences (also besides the different datatype and missing to_real statements).
For example, when looking at the constraint, which is supposed to constrain that delta_plus - delta_minus of 'fri' is equals to 16 - 36 if he works for event 8, 0 - 36 if he doesn't.
My old code using integers and ToReal-conversions produces this expression:
(assert (= (- d+_fr d-_fr) (- (+ (ite e8_fr 16 0)) 36)))
The code using Reals and no type-conversions produces this:
(assert (let ((a!1 (to_real (- (+ (ite e8_fr 16 0)) 36))))
(= (- d+_fr d-_fr) a!1)))
Also the minimization expressions are slightly different:
My old code using integers and ToReal-conversions produces this expression:
(minimize (let (
(a!1 ...)
(a!2 (...))
(a!3 (...))
)
(+ (* 1.0 (/ a!1 14.0)) (* 1.0 DELTA) a!2 a!3)))
The code using Reals and no type-conversions produces this:
(minimize (let (
(a!1 (/ ... 14.0))
(a!2 (...))
(a!3 (...))
)
(+ (* 1.0 a!1) (* 1.0 DELTA) a!2 a!3)))
Sadly I don't know really know how to read this but it seems quite the same to me.

Related

How can i do caret training with cross validation on predefined (grouped) splits in the training data?

I would like to train a ML model in Caret based on training data.
I have a training data from the following structure:
df <- data.frame(Label = c("A","A","A","B","A", "A","A","B","B","A", "B","B","A","A","A"), EXPERIMENT = c("X","X","X","X","X", "Y","Y","Y","Y","Y", "Z","Z","Z","Z","Z"), VALUE1 = c( 1, 2, 1, 5, 1, 3, 1, 5, 6, 1, 7, 5, 1, 2, 2), VALUE2 = c( 9, 7, 8, 1, 8, 2, 1, 9, 8, 2, 7, 7, 2, 1, 1) )
I would want to use train and split the data according to experiments for cross-validation training (in this experiment 3 crossvalidation splits).
that is
Split1: training = X,Y and validation = Z
Split2: training = X,Z and validation = Y
Split2: training = Y,Z and validation = X
How can I do that? With traincontrol?
I found a index option in traincontrol, but did not understand, if that can do it.

Find minimum sum

Find the minimum sum of elements with one element from each row. I think the answer is
-214, but z3py returns unsat. What is wrong?
from z3 import Solver, Int, ForAll, Or
ARR = [
[36, 12, 90, 88, 82],
[-92, 50, 40, 31, 43],
[81, 28, -26, 8, -59],
[18, -99, -70, -33, 58],
[44, -33, 24, -92, -68],
]
s = Solver()
xs = [Int(f"x_{i}") for i, row in enumerate(ARR)]
ys = [Int(f"y_{i}") for i, row in enumerate(ARR)]
for x, y, row in zip(xs, ys, ARR):
s.add(Or(*[x == val for val in row]))
s.add(Or(*[y == val for val in row]))
s.add(ForAll(ys, sum(xs) <= sum(ys)))
print(s.check()) # unsat
Your encoding isn't quite correct. If you stick the following line in your program:
print(s.sexpr())
You'll see that it prints, amongst other things:
(assert (forall ((y_0 Int) (y_1 Int) (y_2 Int) (y_3 Int) (y_4 Int))
(<= (+ 0 x_0 x_1 x_2 x_3 x_4) (+ 0 y_0 y_1 y_2 y_3 y_4))))
And this is the reason why it is unsat. This is a quantified formula, and thus it says it is only satisfiable if the formula is true for all values y_0 .. y_4. This is obviously not true, and hence the unsat result.
Instead of this formulation, you should use z3's optimization engine. Pick one variable from each row, add them, and minimize that result. Something like this:
from z3 import *
ARR = [
[36, 12, 90, 88, 82],
[-92, 50, 40, 31, 43],
[81, 28, -26, 8, -59],
[18, -99, -70, -33, 58],
[44, -33, 24, -92, -68],
]
o = Optimize()
es = [Int(f"e_{i}") for i, row in enumerate(ARR)]
for e, row in zip (es, ARR):
o.add(Or(*[e == val for val in row]))
minTotal = Int("minTotal")
o.add(minTotal == sum(es))
o.minimize(minTotal)
print(o.check())
print(o.model())
When I run this, I get:
sat
[e_0 = 12,
e_3 = -99,
e_2 = -59,
e_1 = -92,
e_4 = -92,
minTotal = -330]
That is, solver picks 12 from the first row, -92 from the second, -59 from the third, -99 from the fourth, and -92 from the last row; for a minimum sum of -330.
It's easy to see that this is the correct solution since the solver picks minimum element from each row, and thus their sum will be minimal as well. (I'm not sure why you were expecting -214 to be the answer.)

Broadcast Schedule Selection

I have an 11 week game schedule for 11 teams (5 games each week). I need to try to select from that list 11 games (1 each week) that provide each of the 11 teams with a broadcast of one home and one away game. Ideally this would be code that I would be able to reuse for future years and that I could scale to more teams and weeks if necessary.
I know that the likelihood of finding a viable solution for a given, already created schedule is extremely low, and, in many cases there doesn't exist a solution. So, when a solution of the type listed above doesn't exist, I would like to get a schedule that come close. That is, one in which all the teams get two broadcasts, but some teams may get two home or two away games instead of one of each.
I've looked a several different approaches. I have a number of 5x2 (Away Team, Home Team) arrays (weekly matchups) that I've tried to run a sort/selection with conditions (like a_1 =\= a_j j>1 and a_i in {1..11}) on, but I can't figure out how to get the double restriction selection to work, and I can't figure out how to make it go back to a previous selection when it has no more viable selections. I've tried to brute force it, but 40 million possible combinations is more than I can handle.
I'm using MATLab to perform all the work. I can usually translate from C or C++ to MATLab usable code.
This seemed like a fun problem so I took a crack at formulating it as an IP.
Let J and T be the set of teams and weeks.
Let G be the set of all games; each element of G is a tuple (i,j,t) that indicates the away team (i), the home team (j), and the week (t).
Let H be the set of all home games; each element of H is a tuple (j,t) that indicates the home team (j) and the week (t).
Define the following binary decision variables:
w[j,t] = 1 if we broadcast the home game at j in week t, = 0 otherwise (defined for (j,t) in H)
x[j] = 1 if team j has an away-game broadcast, = 0 otherwise (defined for j in J)
y[j] = 1 if team j has a home-game broadcast, = 0 otherwise (defined for j in J)
z[j] = 1 if team j has both an away-game and a home-game broadcast, = 0 otherwise (defined for j in J)
Then the model is:
maximize sum {j in J} z[j]
subject to sum {j in J} w[j,t] = 1 for all t
x[j] <= sum {(i,t) in H: (j,i,t) in G} w[i,t] for all j
y[j] <= sum {t in T} w[j,t] for all j
z[j] <= (1/2) * (x[j] + y[j]) for all j
w[j,t], x[j], y[j], z[j] in {0,1}
The objective function calculates the total number of teams that get both a home and an away broadcast. The first constraint says we need exactly one broadcast per week. The second constraint says x[j] can't equal 1 unless there is some week when j's away game gets broadcast. The third constraint says the same for y[j] and the home broadcast. The fourth constraint says z[j] can't equal 1 unless both x[j] and y[j] equal 1. The last constraint says everything has to be binary.
I coded this model in Python/PuLP using an 11-game schedule. (Obviously you'd plug in your own schedule.)
from pulp import *
import numpy as np
# Number of teams, weeks, and games per week.
num_teams = 11
num_weeks = 11
num_games_per_week = 5
# Lists of teams and weeks.
teams = range(1, num_teams+1)
weeks = range(1, num_weeks+1)
# List of game tuples: (i, j, t) means team i plays at team j in week t.
games = [(1, 10, 1), (2, 9, 1), (3, 8, 1), (4, 7, 1), (5, 6, 1),
(6, 4, 2), (7, 3, 2), (8, 2, 2), (9, 1, 2), (10, 11, 2),
(2, 11, 3), (3, 10, 3), (4, 9, 3), (5, 8, 3), (6, 7, 3),
(7, 5, 4), (8, 4, 4), (9, 3, 4), (10, 2, 4), (11, 1, 4),
(3, 1, 5), (4, 11, 5), (5, 10, 5), (6, 9, 5), (7, 8, 5),
(8, 6, 6), (9, 5, 6), (10, 4, 6), (11, 3, 6), (1, 2, 6),
(4, 2, 7), (5, 1, 7), (6, 11, 7), (7, 10, 7), (8, 9, 7),
(9, 7, 8), (10, 6, 8), (11, 5, 8), (1, 4, 8), (2, 3, 8),
(5, 3, 9), (6, 2, 9), (7, 1, 9), (8, 11, 9), (9, 10, 9),
(10, 8, 10), (11, 7, 10), (1, 6, 10), (2, 5, 10), (3, 4, 10),
(11, 9, 11), (1, 8, 11), (2, 7, 11), (3, 6, 11), (4, 5, 11)]
# List of home games: (j, t) means there is a home game at j in week t.
home_games = [(j, t) for (i, j, t) in games]
# Initialize problem.
prob = LpProblem('Broadcast', LpMaximize)
# Generate decision variables.
w = LpVariable.dicts('w', home_games, 0, 1, LpInteger)
x = LpVariable.dicts('x', teams, 0, 1, LpInteger)
y = LpVariable.dicts('y', teams, 0, 1, LpInteger)
z = LpVariable.dicts('z', teams, 0, 1, LpInteger)
# Objective function.
prob += lpSum([z[j] for j in teams])
# Constraint: 1 broadcast per week.
for t in weeks:
prob += lpSum([w[j, t] for j in teams if (j, t) in home_games]) == 1
# Constraint: x[j] can only = 1 if we broadcast a game in which j is away team.
for j in teams:
prob += x[j] <= lpSum([w[i, t] for (i, t) in home_games if (j, i, t) in games])
# Constraint: y[j] can only = 1 if we broadcast a game in which j is home team.
for j in teams:
prob += y[j] <= lpSum(([w[j, t] for t in weeks if (j, t) in home_games]))
# Constraint: z[j] can only = 1 if x[j] and y[j] both = 1.
for j in teams:
prob += z[j] <= 0.5 * (x[j] + y[j])
# Solve problem.
prob.solve()
# Print status.
print("Status:", LpStatus[prob.status])
# Print optimal values of decision variables.
for v in prob.variables():
if v.varValue is not None and v.varValue > 0:
print(v.name, "=", v.varValue)
# Prettier print.
print("\nNumber of teams with both home and away broadcasts: {:.0f}".format(np.sum([z[j].value() for j in teams])))
for (i, j, t) in games:
if w[j, t].value() == 1:
print("Week {:2d}: broadcast team {:2d} at team {:2d}".format(t, i, j))
The results are:
Number of teams with both home and away broadcasts: 11
Week 1: broadcast team 1 at team 10
Week 2: broadcast team 10 at team 11
Week 3: broadcast team 5 at team 8
Week 4: broadcast team 8 at team 4
Week 5: broadcast team 6 at team 9
Week 6: broadcast team 11 at team 3
Week 7: broadcast team 4 at team 2
Week 8: broadcast team 9 at team 7
Week 9: broadcast team 7 at team 1
Week 10: broadcast team 2 at team 5
Week 11: broadcast team 3 at team 6
You can see that each team gets both a home and an away broadcast.

Simple Dask Frequency Count

I want to do a frequency count. Imagine this list of people and their age:
IN [110]: b = db.from_sequence([('alex', 31), ('cassee', 31), ('Wes', 25), ('Allison', 35)])
In [111]: b.map(lambda x: (x[1], 1))\
.foldby(lambda x: x[0], lambda total,x: total[1]+x[1]).compute()
Out[111]: [(31, 2), (25, (25, 1)), (35, (35, 1))]
The first tuple looks good (31, 2) meaning there were 2 occurrence of age 31. However, the format of the next two tuples is weird. I want the output to be the frequency count: [(31, 2), (25, 1), (35, 1)]
The invocation you want is as follows:
b.pluck(1).frequencies().compute()
The pluck does the job of selecting the "age" from each element. frequencies does what the name suggests :)
You could have done this in other ways too:
b.foldby(1, lambda x, y: x + 1, 0).compute()
meaning, use element 1 for grouping, and within each group add 1 to the value so far for each element, starting at 0;
from operator import add
from collections import Counter
b.fold(lambda x, y: x + Counter([y[1]]), add, initial=Counter()).compute()
which is rather complicated to explain...

Best way to convert bit offset to an integer [duplicate]

I have a 64-bit unsigned integer with exactly 1 bit set. I’d like to assign a value to each of the possible 64 values (in this case, the odd primes, so 0x1 corresponds to 3, 0x2 corresponds to 5, …, 0x8000000000000000 corresponds to 313).
It seems like the best way would be to convert 1 → 0, 2 → 1, 4 → 2, 8 → 3, …, 263 → 63 and look up the values in an array. But even if that’s so, I’m not sure what the fastest way to get at the binary exponent is. And there may be more efficient ways, still.
This operation will be used 1014 to 1016 times, so performance is a serious issue.
Finally an optimal solution. See the end of this section for what to do when the input is guaranteed to have exactly one non-zero bit: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn
Here's the code:
static const int MultiplyDeBruijnBitPosition2[32] =
{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
r = MultiplyDeBruijnBitPosition2[(uint32_t)(v * 0x077CB531U) >> 27];
You may be able to adapt this to a direct multiplication-based algorithm for 64-bit inputs; otherwise, simply add one conditional to see if the bit is in the upper 32 positions or the lower 32 positions, then use the 32-bit algorithm here.
Update: Here's at least one 64-bit version I just developed myself, but it uses division (actually modulo).
r = Table[v%67];
For each power of 2, v%67 has a distinct value, so just put your odd primes (or bit indices if you don't want the odd-prime thing) at the right positions in the table. 3 positions (0, 17, and 34) are not used, which might be convenient if you also want to accept all-bits-zero as an input.
Update 2: 64-bit version.
r = Table[(uint64_t)(val * 0x022fdd63cc95386dull) >> 58];
This is my original work, but I got the B(2,6) De Bruijn sequence from this chess site so I can't take credit for anything but figuring out what a De Bruijn sequence is and using Google. ;-)
Some additional remarks on how this works:
The magic number is a B(2,6) De Bruijn sequence. It has the property that, if you look at a 6-consecutive-bit window, you can obtain any six-bit value in that window by rotating the number appropriately, and that each possible six-bit value is obtained by exactly one rotation.
We fix the window in question to be the top 6 bit positions, and choose a De Bruijn sequence with 0's in the top 6 bits. This makes it so we never have to deal with bit rotations, only shifts, since 0's will come into the bottom bits naturally (and we could never end up looking at more than 5 bits from the bottom in the top-6-bits window).
Now, the input value of this function is a power of 2. So multiplying the De Bruijn sequence by the input value performs a bitshift by log2(value) bits. We now have in the upper 6 bits a number which uniquely determines how many bits we shifted by, and can use that as an index into a table to get the actual length of the shift.
This same approach can be used for arbitrarily-large or arbitrarily-small integers, as long as you're willing to implement the multiplication. You simply have to find a B(2,k) De Bruijn sequence where k is the number of bits. The chess wiki link I provided above has De Bruijn sequences for values of k ranging from 1 to 6, and some quick Googling shows there are a few papers on optimal algorithms for generating them in the general case.
If performance is a serious issue, then you should use intrinsics/builtins to use CPU specific instructions, such as the ones found here for GCC:
http://gcc.gnu.org/onlinedocs/gcc-4.5.0/gcc/Other-Builtins.html
Built-in function int __builtin_ffs(unsigned int x).
Returns one plus the index of the least significant 1-bit of x, or if x is zero, returns zero.
Built-in function int __builtin_clz(unsigned int x).
Returns the number of leading 0-bits in x, starting at the most significant bit position. If x is 0, the result is undefined.
Built-in function int __builtin_ctz(unsigned int x).
Returns the number of trailing 0-bits in x, starting at the least significant bit position. If x is 0, the result is undefined.
Things like this are the core of many O(1) algorithms, such as kernel schedulers which need to find the first non-empty queue signified by an array of bits.
Note: I’ve listed the unsigned int versions, but GCC has unsigned long long versions, as well.
You could use a binary search technique:
int pos = 0;
if ((value & 0xffffffff) == 0) {
pos += 32;
value >>= 32;
}
if ((value & 0xffff) == 0) {
pos += 16;
value >>= 16;
}
if ((value & 0xff) == 0) {
pos += 8;
value >>= 8;
}
if ((value & 0xf) == 0) {
pos += 4;
value >>= 4;
}
if ((value & 0x3) == 0) {
pos += 2;
value >>= 2;
}
if ((value & 0x1) == 0) {
pos += 1;
}
This has the advantage over loops that the loop is already unrolled. However, if this is really performance critical, you will want to test and measure every proposed solution.
Some architectures (a suprising number, actually) have a single instruction that can do the calculation you want. On ARM it would be the CLZ (count leading zeroes) instruction. For intel, the BSF (bit-scan forward) or BSR (bit-scan reverse) instruction would help you out.
I guess this isn't really a C answer, but it will get you the speed you need!
precalculate 1 << i (for i = 0..63) and store them in an array
use a binary search to find the index into the array of a given value
look up the prime number in another array using this index
Compared to the other answer I posted here, this should only take 6 steps to find the index (as opposed to a maximum of 64). But it's not clear to me whether one step of this answer is not more time consuming than just bit shifting and incrementing a counter. You may want to try out both though.
See http://graphics.stanford.edu/~seander/bithacks.html - specifically "Finding integer log base 2 of an integer (aka the position of the highest bit set)" - for some alternative algorithsm. (If you're really serious about speed, you might consider ditching C if your CPU has a dedicated instruction).
Since speed, presumably not memory usage, is important, here's a crazy idea:
w1 = 1st 16 bits
w2 = 2nd 16 bits
w3 = 3rd 16 bits
w4 = 4th 16 bits
result = array1[w1] + array2[w2] + array3[w3] + array4[w4]
where array1..4 are sparsely populated 64K arrays that contain the actual prime values (and zero in the positions that don't correspond to bit positions)
#Rs solution is excellent this is just the 64 bit variant, with the table already calculated ...
static inline unsigned char bit_offset(unsigned long long self) {
static const unsigned char mapping[64] = {
[0]=0, [1]=1, [2]=2, [4]=3, [8]=4, [17]=5, [34]=6, [5]=7,
[11]=8, [23]=9, [47]=10, [31]=11, [63]=12, [62]=13, [61]=14, [59]=15,
[55]=16, [46]=17, [29]=18, [58]=19, [53]=20, [43]=21, [22]=22, [44]=23,
[24]=24, [49]=25, [35]=26, [7]=27, [15]=28, [30]=29, [60]=30, [57]=31,
[51]=32, [38]=33, [12]=34, [25]=35, [50]=36, [36]=37, [9]=38, [18]=39,
[37]=40, [10]=41, [21]=42, [42]=43, [20]=44, [41]=45, [19]=46, [39]=47,
[14]=48, [28]=49, [56]=50, [48]=51, [33]=52, [3]=53, [6]=54, [13]=55,
[27]=56, [54]=57, [45]=58, [26]=59, [52]=60, [40]=61, [16]=62, [32]=63
};
return mapping[((self & -self) * 0x022FDD63CC95386DULL) >> 58];
}
I built the table using the provided mask.
>>> ', '.join('[{0}]={1}'.format(((2**bit * 0x022fdd63cc95386d) % 2**64) >> 58, bit) for bit in xrange(64))
'[0]=0, [1]=1, [2]=2, [4]=3, [8]=4, [17]=5, [34]=6, [5]=7, [11]=8, [23]=9, [47]=10, [31]=11, [63]=12, [62]=13, [61]=14, [59]=15, [55]=16, [46]=17, [29]=18, [58]=19, [53]=20, [43]=21, [22]=22, [44]=23, [24]=24, [49]=25, [35]=26, [7]=27, [15]=28, [30]=29, [60]=30, [57]=31, [51]=32, [38]=33, [12]=34, [25]=35, [50]=36, [36]=37, [9]=38, [18]=39, [37]=40, [10]=41, [21]=42, [42]=43, [20]=44, [41]=45, [19]=46, [39]=47, [14]=48, [28]=49, [56]=50, [48]=51, [33]=52, [3]=53, [6]=54, [13]=55, [27]=56, [54]=57, [45]=58, [26]=59, [52]=60, [40]=61, [16]=62, [32]=63'
should the compiler complain:
>>> ', '.join(map(str, {((2**bit * 0x022fdd63cc95386d) % 2**64) >> 58: bit for bit in xrange(64)}.values()))
'0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12'
^^^^ assumes that we iterate over sorted keys, this may not be the case in the future ...
unsigned char bit_offset(unsigned long long self) {
static const unsigned char table[64] = {
0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48,
28, 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49,
18, 29, 11, 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43,
21, 23, 58, 17, 10, 51, 25, 36, 32, 60, 20, 57, 16, 50,
31, 19, 15, 30, 14, 13, 12
};
return table[((self & -self) * 0x022FDD63CC95386DULL) >> 58];
}
simple test:
>>> table = {((2**bit * 0x022fdd63cc95386d) % 2**64) >> 58: bit for bit in xrange(64)}.values()
>>> assert all(i == table[(2**i * 0x022fdd63cc95386d % 2**64) >> 58] for i in xrange(64))
Short of using assembly or compiler-specific extensions to find the first/last bit that's set, the fastest algorithm is a binary search. First check if any of the first 32 bits are set. If so, check if any of the first 16 are set. If so, check if any of the first 8 are set. Etc. Your function to do this can directly return an odd prime at each leaf of the search, or it can return a bit index which you use as an array index into a table of odd primes.
Here's a loop implementation for the binary search, which the compiler could certainly unroll if that's deemed to be optimal:
uint32_t mask=0xffffffff;
int pos=0, shift=32, i;
for (i=6; i; i--) {
if (!(val&mask)) {
val>>=shift;
pos+=shift;
}
shift>>=1;
mask>>=shift;
}
val is assumed to be uint64_t, but to optimize this for 32-bit machines, you should special-case the first check, then perform the loop with a 32-bit val variable.
Call the GNU POSIX extension function ffsll, found in glibc. If the function isn't present, fall back on __builtin_ffsll. Both functions return the index + 1 of the first bit set, or zero. With Visual-C++, you can use _BitScanForward64.
unsigned bit_position = 0;
while ((value & 1) ==0)
{
++bit_position;
value >>= 1;
}
Then look up the primes based on bit_position as you say.
You may find that log(n) / log(2) gives you the 0, 1, 2, ... you're after in a reasonable timeframe. Otherwise, some form of hashtable based approach could be useful.
Another answer assuming IEEE float:
int get_bit_index(uint64_t val)
{
union { float f; uint32_t i; } u = { val };
return (u.i>>23)-127;
}
It works as specified for the input values you asked for (exactly 1 bit set) and also has useful behavior for other values (try to figure out exactly what that behavior is). No idea if it's fast or slow; that probably depends on your machine and compiler.
From the GnuChess source:
unsigned char leadz (BitBoard b)
/**************************************************************************
*
* Returns the leading bit in a bitboard. Leftmost bit is 0 and
* rightmost bit is 63. Thanks to Robert Hyatt for this algorithm.
*
***************************************************************************/
{
if (b >> 48) return lzArray[b >> 48];
if (b >> 32) return lzArray[b >> 32] + 16;
if (b >> 16) return lzArray[b >> 16] + 32;
return lzArray[b] + 48;
}
Here lzArray is a pregenerated array of size 2^16. This'll save you 50% of the operations compared to a full binary search.
This is for 32 bit, java, but it should be possible to adapt it to 64 bit.
It assume this will be the fastest cause there is no branching involved.
static public final int msb(int n) {
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
n >>>= 1;
n += 1;
return n;
}
static public final int msb_index(int n) {
final int[] multiply_de_bruijn_bit_position = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
return multiply_de_bruijn_bit_position[(msb(n) * 0x077CB531) >>> 27];
}
Here is more information from: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup
// Count the consecutive zero bits (trailing) on the right with multiply and lookup
unsigned int v; // find the number of trailing zeros in 32-bit v
int r; // result goes here
static const int MultiplyDeBruijnBitPosition[32] =
{
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
r = MultiplyDeBruijnBitPosition[((uint32_t)((v & -v) * 0x077CB531U)) >> 27];
// Converting bit vectors to indices of set bits is an example use for this.
// It requires one more operation than the earlier one involving modulus
// division, but the multiply may be faster. The expression (v & -v) extracts
// the least significant 1 bit from v. The constant 0x077CB531UL is a de Bruijn
// sequence, which produces a unique pattern of bits into the high 5 bits for
// each possible bit position that it is multiplied against. When there are no
// bits set, it returns 0. More information can be found by reading the paper
// Using de Bruijn Sequences to Index 1 in a Computer Word by
// Charles E. Leiserson, Harald Prokof, and Keith H. Randall.
and as last:
http://supertech.csail.mit.edu/papers/debruijn.pdf

Resources