Skip to content

Commit 4a353be

Browse files
authored
Encoding Forward pass of a Neural Network in Z3
Encoding Forward pass of a Neural Network in Z3
1 parent 03c3400 commit 4a353be

File tree

1 file changed

+52
-17
lines changed

1 file changed

+52
-17
lines changed

Code Examples/Z3 Examples/z3_Neural_Networks.py

Lines changed: 52 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,83 +1,118 @@
1-
# Encoding of a Neural Network in Z3.
1+
# Encoding of a Neural Network in Z3
2+
# A custom loss function.
3+
# A Forward pass over one hiddebn layer.
24
from z3 import *
35
import numpy as np
46

57
if __name__ == "__main__":
68

9+
MIDDLE_LAYER_OUT = 6
10+
# Z3 Sovler instance.
711
S = z3.Solver()
812

9-
inputs = [z3.Real(f"{i}_l1") for i in ["x_input","y_input","n_input","a_input"]]
10-
outputs1 = [z3.Real(f"activation_{i}") for i in range(10)]
11-
12-
outputs = [z3.Real(f"{i}_l1") for i in ["C_0","C_1","C_2","C_3"]]
13+
# Inputs to the neural network, loss and intermediate sumVal.
14+
inputs = [z3.Real(f"{i}_l1") for i in ["x_input","y_input","n_input","a_input"]]
1315
sumVal = z3.Real("sumVal")
1416
loss = z3.Real("loss")
1517

18+
# First Fully Connected Layer. weigths, bias and outputs
1619
layer_1_weights = [[z3.Real(f"weight_l1_{i}_{j}")
17-
for j in range(10)]
20+
for j in range(MIDDLE_LAYER_OUT)]
1821
for i in range(4)]
1922

20-
layer_1_bias = [z3.Real(f"bias_l1_{i}") for i in range(10)]
23+
layer_1_bias = [z3.Real(f"bias_l1_{i}") for i in range(MIDDLE_LAYER_OUT)]
24+
outputs1 = [z3.Real(f"activation_{i}") for i in range(MIDDLE_LAYER_OUT)]
2125

26+
# Second Fully Connected Layer. weigths, bias and outputs
2227
layer_2_weights = [[z3.Real(f"weight_l2_{i}_{j}")
2328
for j in range(4)]
24-
for i in range(10)]
29+
for i in range(MIDDLE_LAYER_OUT)]
2530

2631
layer_2_bias = [z3.Real(f"bias_l2_{i}") for i in range(4)]
32+
outputs = [z3.Real(f"{i}_l1") for i in ["C_0","C_1","C_2","C_3"]]
2733

34+
# Convert to np.matrix, np.array
35+
# Just to make doing the math easier.
2836
weights1 = np.matrix(layer_1_weights)
2937
bias1 = np.matrix(layer_1_bias)
30-
activation1 = np.matrix(outputs1)
3138
weights2 = np.matrix(layer_2_weights)
3239
bias2 = np.matrix(layer_2_bias)
3340
inp = np.matrix(inputs)
3441

42+
# Single Forward pass on the first layer.
3543
layer_1_out = weights1.T * inp.T + bias1.T
3644

45+
# Apply ReLU() to output of layer-1.
3746
for index, elems in enumerate(layer_1_out):
3847
rhs = elems.tolist()[0][0]
3948
lhs = outputs1[index]
4049
# ReLU Activation
4150
S.add(z3.If(rhs >= 0, lhs == rhs, lhs == 0))
4251
if index == 0:
4352
print(z3.If(rhs >= 0, lhs == rhs, lhs == 0))
44-
# print(f"{lhs} == {rhs}", end="\n\n")
4553

54+
# Single Forward pass on the second layer.
55+
# Use the activations from the first layer.
56+
activation1 = np.matrix(outputs1)
4657
layer_2_out = activation1 * weights2 + bias2
4758

59+
# Apply ReLU() to output to layer-2.
4860
for index, elems in enumerate(layer_2_out):
4961
rhs = elems.tolist()[0][0]
5062
lhs = outputs[index]
5163
# ReLU Activation
5264
S.add(z3.If(rhs >= 0, lhs == rhs, lhs == 0))
5365
if index == 0:
5466
print(z3.If(rhs >= 0, lhs == rhs, lhs == 0))
55-
# print(f"{lhs} == {rhs}", end="\n\n")
5667

68+
# dot product of coefficients
69+
# This is the case for the specific loss function
70+
# we are trying to use here.
5771
sumVal = z3.Sum([(inputs[index] * outputs[index]) for index in range(4)])
5872
print(sumVal)
5973

74+
# Put some restrictions on what the values for
75+
# the weights and the biases can be for both the layers.
6076
weights_constraint = []
6177
[[weights_constraint.append(j > 2) for j in i] for i in layer_1_weights]
6278
S.add(z3.And(weights_constraint))
79+
6380
weights_constraint = []
6481
[[weights_constraint.append(j > 3) for j in i] for i in layer_2_weights]
6582
S.add(z3.And(weights_constraint))
6683

84+
# Put some restrictions on what the values for
85+
# the weights and the biases can be for both the layers.
6786
[S.add(elem > 1) for elem in layer_1_bias]
6887
[S.add(elem > 7) for elem in layer_2_bias]
6988

70-
[S.add(j > 14) for j in inputs]
89+
# Specifying a range of values that the
90+
# can be passed as input to the neural network.
91+
[S.add(j > 0) for j in inputs]
92+
[S.add(j <= 1024) for j in inputs]
93+
94+
# We want to enforce that none of the
95+
# last layer neurons give zero output.
96+
# this is a requirement for this
97+
# specific problem.
7198
[S.add(j != 0) for j in outputs]
7299

73-
# Loss
100+
# Final Loss function that uses inputs and the sumVal computed above.
74101
S.add(loss == sumVal ** 2 + 10000 * z3.If(inputs[1] - inputs[-1] >= 0, inputs[1] - inputs[-1], 0))
75-
S.add(loss == 0)
102+
S.add(z3.ForAll([x for x in inputs], (sumVal ** 2 + 10000 * z3.If(inputs[1] - inputs[-1] >= 0, inputs[1] - inputs[-1], 0)) == 0))
76103

77-
# print(S.assertions())
78-
print(S.check())
79-
print(S.model())
104+
# Write query to the file and check
105+
# with z3.
106+
# z3 -v:5 -st -smt2 ...
107+
with open("query.smt2", mode="w") as smt_writer:
108+
smt_writer.write(S.to_smt2())
109+
110+
# Uncomment if needed.
111+
# # print(S.assertions())
112+
# print(S.check())
113+
# print(S.model())
80114

115+
# A possible answer after 210 hours.
81116
# [weight_l1_3_3 = 131073/65536,
82117
# weight_l2_2_2 = 4,
83118
# n_input_l1 = 15,

0 commit comments

Comments
 (0)