TypeError: вывод ufunc 'add' (код типа 'O') не может быть приведен к предоставленному выходному параметру в соответствии с правилом приведения "same_kind"
Я пытаюсь реализовать схему федеративного обучения самостоятельно, но получаю следующее предупреждение и ошибку:
VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-
or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
error = training_outputs - output
Traceback (most recent call last):
File "C:\Users\Code\main.py", line 176, in <module>
initializer.training_update(initializer.first_model_dict.get('final_weights'), local_data_list, local_output_list, 10000)
File "C:\Users\Code\main.py", line 68, in training_update
model = percep.train(local_data[i], local_output[i], training_iterations)
File "C:\Users\Code\main.py", line 42, in train
self.weights += adjustments
TypeError: ufunc 'add' output (typecode 'O') could not be coerced to provided output parameter (typecode 'd') according to the casting rule ''same_kind''
Вот код:
import numpy as np
class Perceptron():
def __init__(self):
# Seed the random number generator
np.random.seed(1)
# Set weights to a 3x1 matrix,
# with values from -1 to 1 and mean 0
self.weights = 2 * np.random.random((3, 1)) - 1
def sigmoid(self, x):
"""
Takes in weighted sum of the inputs and normalizes
them through between 0 and 1 through a sigmoid function
"""
return 1 / (1 + np.exp(-x))
def sigmoid_dx(self, x):
"""
The derivative of the sigmoid function used to
calculate necessary weight adjustments
"""
return x * (1 - x)
def train(self, training_inputs, training_outputs, training_iterations):
"""
We train the model through trial and error, adjusting the
weights each time to get a better result
"""
for _ in range(training_iterations):
# Pass training set through the neural network
output = self.results(training_inputs)
# Calculate the error rate
error = training_outputs - output
# Multiply error by input and gradient of the sigmoid function
# Less confident weights are adjusted more through the nature of the function
adjustments = np.dot(training_inputs.T, error * self.sigmoid_dx(output))
# Adjust weights
self.weights += adjustments
final_weights = self.weights
model = {'output':output,
'error':error,
'adjustments':adjustments,
'weights':self.weights,
'final_weights':final_weights
}
return model
def results(self, inputs):
"""
Pass inputs through the neural network to get output
"""
inputs = inputs.astype(float)
output = self.sigmoid(np.dot(inputs, self.weights))
return output
def training_update(self, weights, local_data, local_output, training_iterations):
self.weights = weights
percep = Perceptron()
trained_model_list = []
for i in range(len(local_data)):
model = percep.train(local_data[i], local_output[i], training_iterations)
trained_model_list.append(model)
return trained_model_list
def get_model_dict(self, model_dict, key):
return model_dict.get(key)
class Server(Perceptron):
def __init__(self):
pass
# Train the first global model
def first_model(self):
perceptron = Perceptron()
training_inputs = np.array([[0,0,1],
[1,1,1],
[1,0,1],
[0,1,1]])
training_outputs = np.array([[0,1,1,0]]).T
first_model_dict = perceptron.train(training_inputs, training_outputs, 10000)
return first_model_dict
def aggregation(self, modelList):
weights = []
sum = 0
for i in modelList:
weights.append(modelList[i].get(final_weights))
for i in weights:
sum += i
res = sum / len(modelList)
return res
class Device(Server):
def __init__(self, id, first_model_dict):
self.id = id
self.first_model_dict = first_model_dict
def initialization(self):
local_data_list = []
local_output_list = []
device_list = []
np.random.seed(1)
print("Enter the number of devices: ")
n = int(input())
print("Configuring local data...: ")
for i in range(n):
print("Enter lower number (0 by default): ")
a = int(input())
print("Enter limit number (2 by default): ")
b = int(input())
print("Enter number of rows of matrix (1000 by default): ")
c = int(input())
print("Enter number of columns of matrix (3 by default): ")
d = int(input())
local_data = np.random.randint(a, b, (c, d))
local_output = []
server = Server()
devices = Device(i, server.first_model())
if local_data[i][0] == 0:
res = 0
local_output.append(res)
local_output.append(np.random.randint(a, b, (c - 1, 1)))
else:
res = 1
local_output.append(res)
local_output.append(np.random.randint(a, b, (c - 1, 1)))
local_data_list.append(local_data)
local_output_list.append(local_output)
device_list.append(devices)
return local_data_list, local_output_list, device_list
if __name__ == "__main__":
server = Server()
initializer = Device(10, server.first_model())
local_data_list, local_output_list, device_list = initializer.initialization()
initializer.training_update(initializer.first_model_dict.get('final_weights'), local_data_list, local_output_list, 10000)
Я не думаю, что проблема заключается в операциях, потому что я тестировал эту функцию, и она работала нормально. Возможно, в основном методе есть проблемы с инициализацией моих переменных, но я новичок в python и не могу найти основную проблему.