probabilities/mutations22.py

405 lines
12 KiB
Python
Raw Normal View History

2023-01-01 23:45:51 +00:00
from cmath import isnan
import numpy as np
import random
import hashlib
import math
def get_state_id(state):
return ','.join([str(x) for x in sorted(state)])
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def id(self):
return ','.join([str(int(x)) for x in self.x])
class Influence():
def __init__(self, a, b):
self.a = a
self.b = b
self.original_dof = set()
self.dof = set()
for i in range(0, len(a.x)):
if a.x[i] != b.x[i]:
self.original_dof.add(i)
self.dof.add(i)
def coherent(self):
return self.a.y == self.b.y
def id(self):
return ','.join(sorted([self.a.id(), self.b.id()]))
def encode(v):
byte_values = []
for i in range(0, math.ceil(len(v) / 8)):
x = 0
for j in range(0, 8):
index = i * 8 + j
if index >= len(v):
continue
x <<= 1
x |= int(v[index])
byte_values.append(x)
return bytearray(byte_values)
def decode(x, N):
index = 0
output = np.zeros((N))
while x > 0 and index < N:
output[index] = x & 0b1
x >>= 1
index += 1
return output
def sha(v):
x = encode(v)
m = hashlib.sha256()
m.update(x)
result = m.digest()
return result[0] & 0b1
def hamming_distance(a, b):
return np.sum(np.logical_xor(a.x, b.x))
def random_x(N):
x = np.zeros((N))
for i in range(0, N):
x[i] = random.randint(0, 1)
return x
def xor(x):
# return sum(x[:4]) % 2
return sum(x) % 2
def create_dof_map(influences):
dof_map = {}
for influence in influences:
for i in influence.dof:
if not i in dof_map:
dof_map[i] = []
dof_map[i].append(influence)
return dof_map
def flip(influences, i):
for influence in influences:
if i in influence.dof:
influence.a.y = int(influence.a.y) ^ 1
def remove_dof(dof_map, i, flip = False):
for influence in dof_map[i]:
influence.dof.remove(i)
if flip:
influence.a.y = int(influence.a.y) ^ 1
# if len(influence.dof) == 0 and not influence.coherent():
# raise Exception('Invalid')
del dof_map[i]
def solve(dof_map, all_influences, all_samples):
eliminated = True
while eliminated:
eliminated = False
for influence in all_influences:
if len(influence.dof) == 1:
i = next(iter(influence.dof))
if influence.coherent:
remove_dof(dof_map, i)
eliminated = True
else:
print('Forced', i)
remove_dof(dof_map, i, True)
eliminated = True
lowest_dof = None
for influence in all_influences:
if not influence.coherent and len(influence.dof) > 1:
if lowest_dof is None or len(influence.dof) < len(lowest_dof.dof):
lowest_dof = influence
flip = None
highest_score = -1
for i in lowest_dof.dof:
per_point_scores = {}
i_influences = dof_map[i]
left = 0
right = 0
for influence in i_influences:
if not influence.a in per_point_scores:
per_point_scores[influence.a] = [0, 0]
if not influence.b in per_point_scores:
per_point_scores[influence.b] = [0, 0]
if influence.coherent:
per_point_scores[influence.a][0] += 1
per_point_scores[influence.b][0] += 1
left += 1
else:
per_point_scores[influence.a][1] += 1
per_point_scores[influence.b][1] += 1
right += 1
print(i, left / (left + right))
num = 0
denom = 0
for _, score in per_point_scores.items():
if score[0] == score[1]:
continue
print(i, score)
num += score[1] / (score[0] + score[1])
denom += 1
score = num / denom if denom > 0 else 0
print(score)
return None
# 1st row (n+1 choose k+1) * (1-(k mod 2))
# psuedopascal to compute the follow-on rows
# assuming solvability, we want to maximize the probability that our current state and our state with
# a particular single flip are one order apart in the correct direction
# 2, 0
# 2, 2, 0
# 2, 4, 2, 0
# 2, 6, 6, 2, 0
# 2, 8,12, 8, 2, 0
# 2,10,20,20,10, 2, 0
# 3,-9,19,-33,51,-73,99
# 3,-6,10,-14,18,-22,26
# 3,-3, 4, -4, 4, -4, 4
# 3, 0, 1, 0, 0, 0, 0
# 3, 3, 1, 1, 0, 0, 0
# 3, 6, 4, 2, 1, 0, 0
# 3, 9,10, 6, 3, 1, 0
# 4, 0, 4, 0
# 4, 4, 4, 4, 0
# 4, 8, 8, 8, 4, 0
# 4,12,16,16,12, 4, 0
# 5, 0,10, 0, 1
# 5, 5,10,10, 1, 1
# 5,
# 5,
# 3
#
# @1 [1, 2, 1]
# @2 [2, 2, 0]
# @3 [3, 0, 1]
# 5 [5, 10, 10, 5, 1] (5 choose 1, 5 choose 2, ...)
#
# @1 [1, 4, 6, 4, 1], [4, 6, 4, 1, 0] - 16, 15 - binomial (4 choose 0, 4 choose 1, 4 choose 2),
# @2 [2, 6, 6, 2, 0], [3, 4, 4, 3, 1] - 16, 15 - (4 choose 1) + (2 choose -1) - (2 choose 1)
# @3 [3, 6, 4, 2, 1], [2, 4, 6, 3, 0] - 16, 15 - (4 choose 2) + (2 choose -2) - (2 choose 2) + (2 choose -1) - (2 choose 1)
# @4 [4, 4, 4, 4, 0], [1, 6, 6, 1, 1] - 16, 15 -
# @5 [5, 0, 10, 0, 1], [0, 10, 0, 5, 0] - 16, 15 -
# @0 [0.0, 0.0, 0.0, 0.0, 0.0]
# @1 [0.2, 0.4, 0.6, 0.8, 1.0]
# @2 [0.4, 0.6, 0.6, 0.4, 0.0]
# @3 [0.6, 0.6, 0.4, 0.4, 1.0]
# @4 [0.8, 0.4, 0.4, 0.8, 0.0]
# @5 [1.0, 0.0, 1.0, 0.0, 1.0]
# 6
#
# @1 [1, 5, 10, 10, 5, 1]
# @2 [2, 8, 12, 8, 2, 0]
# @3 [3, 9, 10, 6, 3, 1]
# @4 [4, 8, 8, 8, 4, 0]
# @5 [5, 5, 10, 10, 1, 1]
# @6 [6, 0, 20, 0, 6, 0]
# last row, 1 if odd, 0 if even
# second to last, subtract 2 on odds, add 2 on evens
def compute_distributions(N):
dist = np.zeros((N, N))
for j in range(0, N):
dist[0][j] = math.comb(N - 1, j)
dist[-1][j] = math.comb(N, j + 1) * (1 - (j % 2))
for i in range(1, N):
for j in range(0, i + 1):
dist[i][j] = math.comb(i + 1, j + 1) * (1 - (j % 2))
for k in range(i + 1, N):
for j in reversed(range(0, k)):
dist[i][j+1] = dist[i][j] + dist[i][j+1]
print(dist)
for i in range(0, N):
for j in range(0, N):
denom = math.comb(N, j+1)
dist[i][j] /= denom
return dist
def raised_cosine(x, u, s):
if x < (u - s):
return 0
if x > (u + s):
return 0
return 1.0 / (2.0 * s) * (1 + math.cos(math.pi * (x - u) / s))
def average_index(x):
total = 0
for k in range(0, len(x)):
total += k * x[k]
return total / np.sum(x)
# 8, 32, 2^5
# 10, 64, 2^6
# 12, 128, 2^7
# 14, 256, 2^8
# 16, 512, 2^9
# 18, 1024, 2^10
# 20, 2048, 2^11
# 22, 4096, 2^12
def main():
N = 16
sample_size = 128
sample_ids = set()
samples = []
dist = compute_distributions(N)
print(dist)
for i in range(0, sample_size):
x = random_x(N)
y = int(xor(x))
p = Point(x, y)
p_id = p.id()
if p_id in sample_ids:
continue
sample_ids.add(p_id)
samples.append(p)
total_sample_count = len(samples)
# for i in range(0, 2**N):
# x = decode(i, N)
# y = int(xor(x))
# samples.append(Point(x,y))
base = np.zeros(N)
current = np.zeros(N)
cumulative_probability = np.ones(N)
for _ in range(0, N):
lowest_err = -1
use_flip = -1
for flip in range(-1, N):
coherent_distances = np.zeros(N+1)
incoherent_distances = np.zeros(N+1)
all_coherent = True
for i in range(0, len(samples)):
a = samples[i]
for j in range(0, len(samples)):
b = samples[j]
distance = hamming_distance(a, b)
is_coherent = ((flip < 0 or a.x[flip] == b.x[flip]) and a.y == b.y) or ((flip >= 0 and a.x[flip] != b.x[flip]) and a.y != b.y)
if is_coherent:
coherent_distances[distance] += 1
else:
incoherent_distances[distance] += 1
all_coherent = False
if all_coherent:
print('Flip and halt', flip)
return
# print(coherent_distances, incoherent_distances)
# print(coherent_distances, incoherent_distances)
est_incoherence = np.divide(incoherent_distances, np.add(coherent_distances, incoherent_distances))
# print(est_incoherence)
for k in range(0, N):
known_incoherence_at_k = dist[k]
err = 0
# denom = 0
probability = 1.0
for i in range(1, N + 1):
if isnan(est_incoherence[i]):
continue
sample_size = coherent_distances[i] + incoherent_distances[i]
full_size = math.comb(N, i) * (2 ** N)
num_unknowns = full_size - sample_size
min_true_value = incoherent_distances[i] / full_size
max_true_value = (incoherent_distances[i] + num_unknowns) / full_size
s = max(abs(est_incoherence[i] - min_true_value), abs(est_incoherence[i] - max_true_value))
u = est_incoherence[i]
known_incoherence = known_incoherence_at_k[i - 1]
err = raised_cosine(known_incoherence, u, s)
probability *= err
# print(k, i, min_true_value, max_true_value)
# confidence = (coherent_distances[i] + incoherent_distances[i]) / math.comb(N, i) # probability that the sample is representative
# err += abs(est_incoherence[i] - known_incoherence_at_k[i-1]) * confidence
# denom += 1
# print(flip, k, err)
# err /= denom
if flip < 0:
base[k] = probability
else:
current[k] = probability
if flip >= 0:
if np.sum(current) == 0:
continue
np.divide(current, np.sum(current), current)
# print(current)
# temp = np.roll(cumulative_probability, -1)
# temp[-1] = 1.0
# np.multiply(current, temp, current)
# np.divide(current, np.sum(current), current)
p_forward = 0
p_backward = 0
for i in range(1, N):
p_forward += cumulative_probability[i] * current[i - 1]
for i in range(0, N - 1):
p_backward += cumulative_probability[i] * current[i + 1]
# base_index = average_index(cumulative_probability)
# new_index = average_index(current)
# if isnan(new_index):
# continue
# np.divide(current, np.sum(current), current)
# np.subtract(1, current, current)
print(flip,p_forward,p_backward,current)
delta = p_forward - p_backward
if use_flip < 0 or delta > lowest_err:
use_flip = flip
lowest_err = delta
# for k in range(0, N - 1):
# value = current[k] * cumulative_probability[k + 1]
# if use_flip < 0 or value > lowest_err:
# use_flip = flip
# lowest_err = value
# print(flip, highest_value)
else:
np.divide(base, np.sum(base), base)
# np.subtract(1, base, base)
# print(cumulative_probability)
cumulative_probability = np.roll(cumulative_probability, -1)
cumulative_probability[-1] = 1.0
# print(cumulative_probability)
# print(base)
np.multiply(base, cumulative_probability, cumulative_probability)
np.divide(cumulative_probability, np.sum(cumulative_probability), cumulative_probability)
print(cumulative_probability)
if use_flip < 0:
return
print('Flip', use_flip, lowest_err)
for p in samples:
if p.x[use_flip]:
p.y ^= 1
if __name__ == "__main__":
main()