Skip to content

Commit

Permalink
Performance tests
Browse files Browse the repository at this point in the history
  • Loading branch information
csirmaz committed May 14, 2023
1 parent 0040f91 commit fe02166
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 21 deletions.
19 changes: 13 additions & 6 deletions csequence.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 169,7 @@ def as_set(self):


@classmethod
def from_set_union(cls, command_sets):
def from_set_union(cls, command_sets, return_length=False):
"""Turn the union of some command sets into a command sequence; the order of commands is not guaranteed
Arguments:
Expand All @@ -191,7 191,9 @@ def from_set_union(cls, command_sets):
if prev_command is None or (not command.equals(prev_command)):
union.append(command)
prev_command = command
print(f"Union is {len(union)} long for:")

if return_length:
return (CSequence(union), len(union))
return CSequence(union)


Expand Down Expand Up @@ -411,7 413,7 @@ def set_bit(node, bit):


@classmethod
def get_any_merger(cls, command_sets, decisions=None, debug=False):
def get_any_merger(cls, command_sets, decisions=None, debug=False, return_lengths=False):
"""Given a set of jointly refluent canonical command sets, generate a merger.
Suitable to produce all possible mergers; see Session.get_all_mergers().
Expand Down Expand Up @@ -512,8 514,12 @@ def mark_delete_destructors_up(command):
continue
break

# from_set_union() applies order_by_node_value()
union = cls.from_set_union(command_sets).add_up_pointers().add_backlinks()
# from_set_union() applies order_by_node_value()
if return_lengths:
union, len_union = cls.from_set_union(command_sets, return_length=True)
else:
union = cls.from_set_union(command_sets)
union = union.add_up_pointers().add_backlinks()

# (0) Initialise flags in top-down order
for command in union.forward():
Expand Down Expand Up @@ -703,5 709,6 @@ def process_node_commands_4(commands):
for i, d in enumerate(decisions):
print(f" #{i} {d['current_decision']} of {d['num_options']} for {d['comment']}")

print(f" merger_size={len(merger)}")
if return_lengths:
return (decisions, CSequence(merger), {'union': len_union, 'merger': len(merger)})
return (decisions, CSequence(merger))
45 changes: 30 additions & 15 deletions test_on_set.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,16 139,25 @@ def generate_sequences(self, num_users):
})


num_experiments = 10

# CSV header
csv_hdr = ([
'spread', 'size', 'num_users', 'num_mergers', 'max_nodes', 'num_nodes', 'seq_len', 'union_len', 'merger_len',
'avg_time', 'mse', 'merger_time', 'command_time'
] [f"t{i}" for i in range(num_experiments)])
print(",".join(csv_hdr))

experiments = {}
for experiment in range(10):
for experiment in range(num_experiments):
random.shuffle(settings)
for setting in settings:
for set_ix, setting in enumerate(settings):
spread = setting['spread']
size = setting['size']
num_users = setting['num_users']
num_mergers = setting['num_mergers']

max_size = size*(2*spread 1) size*(2*spread 1)*(2*spread 1) size*(2*spread 1)*(2*spread 1)*size
max_nodes = size*(2*spread 1) size*(2*spread 1)*(2*spread 1) size*(2*spread 1)*(2*spread 1)*size

if False and experiment == 0:
test1 = Test(size=size, spread=spread, num_users=num_users)
Expand All @@ -165,7 174,7 @@ def generate_sequences(self, num_users):
test = Test(size=size, spread=spread, num_users=num_users) # reset flags, etc.
gc.collect()
start = timer()
decisions, merger = CSequence.get_any_merger(test.sequences, decisions=decisions, debug=False)
decisions, merger, lengths = CSequence.get_any_merger(test.sequences, decisions=decisions, debug=False, return_lengths=True)
end = timer()
time_spent = end - start
if decisions is None: # no more mergers
Expand All @@ -175,15 184,21 @@ def generate_sequences(self, num_users):
break

if i == num_mergers: # We have enough mergers
key = f"spread={spread} size={size} users={num_users} mergers={num_mergers} -> max_nodes={max_size} num_nodes={test.num_nodes} seq_len={test.sequence_length}"
print(f"{key} #{experiment} -> {time_spent}s")
exp_data = [spread, size, num_users, num_mergers, max_nodes, test.num_nodes, test.sequence_length, lengths['union'], lengths['merger']]
key = ":".join([str(x) for x in exp_data])
if key not in experiments:
experiments[key] = []
experiments[key].append(time_spent)

time.sleep(30)

print("----- AGGREGATING ------")
for k, times in experiments.items():
avg = sum(times)/len(times)
print(f"{k} -> times: " " ".join([str(t) for t in times]) f" average: {avg} s")
experiments[key] = {'data': exp_data, 'times':[]}
print(f"PROGRESS {(experiment*len(settings) set_ix)/(num_experiments*len(settings))*100}%")
experiments[key]['times'].append(time_spent)

if experiment < num_experiments - 1:
time.sleep(30)

# CSV lines
import numpy as np
for k, exp_data in experiments.items():
times = np.array(exp_data['times'])
avg = np.average(times)
mse = np.sum(np.square(times - avg))
csv = exp_data['data'] [avg, mse, avg/exp_data['data'][3], avg/exp_data['data'][3]/exp_data['data'][7]] exp_data['times']
print(",".join([str(x) for x in csv]))

0 comments on commit fe02166

Please sign in to comment.