Есть простая нейросеть которая обучается и на выход должна выдать измененный список весов и смещения, но функция возвращает то что было в начале вне цикла объявлено, хотя ранее писал, всё менялось. Есть похожий код, но вместо list используется numpy.array. Почему так, если меняю переменные ?
from random import uniform
data = [
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
]
response = [0, 1, 2, 2, 1, 0, 0, 1, 2, 2, 1, 0]
hardlim = lambda s: 1 if s >= 0 else 0
def net(x: list, w: list, bias: int) -> int:
assert len(x) == len(w)
return sum([ w[i] * x[i] for i in range(len(x)) ]) + bias
def train(data: list, current: list, epoch: int=10):
w = [uniform(0, 1) for _ in range(3)]
print(w)
bias = 0
for _ in range(epoch):
for i in range(len(data)):
res = net(data[i], w, bias)
err = current[i] - res
# w = [w[j] + (err * data[i][j]) for j in range(len(w))]
w[0] += err * data[i][0]
w[1] += err * data[i][1]
w[2] += err * data[i][2]
bias += err
print(w, bias)
return w, bias
W, bias = train(data, response, 3)
print(W, bias)
Вывод
[0.21010613206587636, 0.9633637928027444, 0.41479921119301144]
[0.0, 0.9633637928027444, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.03663620719725558
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 1.2101061320658764, 1.9633637928027445] 1.5852007888069886
[0.0, 1.2101061320658764, 0.41479921119301144] 0.036636207197255466
[0.0, 0.9633637928027445, 0.41479921119301144] -0.21010613206587636
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0
[0.21010613206587636, 0.9633637928027445, 0.41479921119301144] 0.0