ValueError: формы (None, 1) и (None, 41) несовместимы
Привет, я пытаюсь интегрировать этот код https://github.com/stijani/tutorial/blob/master/fl_implemetation.py с кодом классификации текста. Код, который приведен в ссылке, использовал данные изображения, поэтому я столкнулся с трудностями при интеграции этого кода с текстовыми данными ... и я не понимаю, какой параметр передать этой функции для формы и размера данных изображения, это smlp_global.build([784, 10) .. В которой форма 28 281 и размер классов 10
Я поставил [None] для формы и 41 класса, но получил ошибку ValueError: Shapes (None, 1) и (None, 41) несовместимы
import numpy as np
import pandas as pd
import random
import cv2
import os
from imutils import paths
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dropout
#import libraries
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import metrics
import string
from sklearn.feature_extraction.text import TfidfVectorizer
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.layers import Embedding
import seaborn as sns
pd.set_option('display.max_colwidth', -1)
current_dir = os.getcwd()
def create_clients(image_list, label_list, num_clients=10, initial='clients'):
''' return: a dictionary with keys clients' names and value as
data shards - tuple of images and label lists.
args:
image_list: a list of numpy arrays of training images
label_list:a list of binarized labels for each image
num_client: number of fedrated members (clients)
initials: the clients'name prefix, e.g, clients_1
'''
#create a list of client names
client_names = ['{}_{}'.format(initial, i+1) for i in range(num_clients)]
#randomize the data
data = list(zip(X_train, y_train))
random.shuffle(data)
#shard data and place at each client
size = len(data)//num_clients
shards = [data[i:i + size] for i in range(0, size*num_clients, size)]
#number of clients must equal number of shards
assert(len(shards) == len(client_names))
return {client_names[i] : shards[i] for i in range(len(client_names))}
def batch_data(data_shard, bs=32):
'''Takes in a clients data shard and create a tfds object off it
args:
shard: a data, label constituting a client's data shard
bs:batch size
return:
tfds object'''
#seperate shard into data and labels lists
data, label = zip(*data_shard)
dataset = tf.data.Dataset.from_tensor_slices((list(data), list(label)))
return dataset.shuffle(len(label)).batch(bs)
class SimpleMLP:
@staticmethod
def build(shape, classes):
early_stop=tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=3, min_delta=0.0001)
tf.keras.backend.clear_session()
embed_size = 100
model = keras.models.Sequential([
Embedding(num_tokens,
embedding_dim,
embeddings_initializer=keras.initializers.Constant(embedding_matrix),
mask_zero=True,input_shape=[None],trainable=False),
keras.layers.Bidirectional(keras.layers.LSTM(256, dropout = 0.4)),
keras.layers.Dense(41, activation="softmax")
])
return model
def weight_scalling_factor(clients_trn_data, client_name):
client_names = list(clients_trn_data.keys())
#get the bs
bs = list(clients_trn_data[client_name])[0][0].shape[0]
#first calculate the total training data points across clinets
global_count = sum([tf.data.experimental.cardinality(clients_trn_data[client_name]).numpy() for client_name in client_names])*bs
# get the total number of data points held by a client
local_count = tf.data.experimental.cardinality(clients_trn_data[client_name]).numpy()*bs
return local_count/global_count
def scale_model_weights(weight, scalar):
'''function for scaling a models weights'''
weight_final = []
steps = len(weight)
for i in range(steps):
weight_final.append(scalar * weight[i])
return weight_final
def sum_scaled_weights(scaled_weight_list):
'''Return the sum of the listed scaled weights. The is equivalent to scaled avg of the weights'''
avg_grad = list()
#get the average grad accross all client gradients
for grad_list_tuple in zip(*scaled_weight_list):
layer_mean = tf.math.reduce_sum(grad_list_tuple, axis=0)
avg_grad.append(layer_mean)
return avg_grad
def test_model(X_test, Y_test, model, comm_round):
cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
#logits = model.predict(X_test, batch_size=100)
logits = model.predict(X_test)
loss = cce(Y_test, logits)
acc = accuracy_score(tf.argmax(logits, axis=1), tf.argmax(Y_test, axis=1))
print('comm_round: {} | global_acc: {:.3%} | global_loss: {}'.format(comm_round, acc, loss))
return acc, loss
#from fl_mnist_implementation_tutorial_utils import *
# HP DATA LOAD******************************************************
data = pd.read_json('z:/auction/python/News_Category_Dataset_v2.json')
data['category'] = data['category'].map(lambda x: "WORLDPOST" if x == 'THE WORLDPOST' else x)
data['text'] = data['headline'] + " " + data['short_description']
X,Y = data['text'],data['category']
#80% to train , 10% for validation , 10% for testing
X_train, X_val, y_train, y_val = train_test_split(X,Y, test_size=0.2, random_state=42)
X_val, X_test , y_val, y_test= train_test_split(X_val,y_val, test_size=0.5, random_state=42)
vocab_size =20000
max_length = 150
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
#NLP tokelization with keras
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train = tokenizer.texts_to_sequences(X_train)
X_train = pad_sequences(X_train,maxlen= max_length,padding=padding_type, truncating=trunc_type)
y_train = np.asarray(y_train)
y_train = pd.get_dummies(y_train)
X_val = tokenizer.texts_to_sequences(X_val)
X_val = pad_sequences(X_val,maxlen= max_length,padding=padding_type, truncating=trunc_type)
y_val = np.asarray(y_val)
y_val = pd.get_dummies(y_val)
train_set = np.array(X_train)
val_set = np.array(X_val)
train_label = np.array(y_train)
val_label = np.array(y_val)
y_test = pd.get_dummies(y_test)
y_test = np.asarray(y_test)
y_test = np.argmax(y_test,axis=1) #this would be our ground truth label while testing
print(train_set.shape)
print(train_label.shape)
print(val_set.shape)
print(val_label.shape)
#glove
path_to_glove_file = './glove.6B.100d.txt'
#Initialising the embedding matrix with glove vec embeddings
num_tokens = len(tokenizer.word_index.items()) + 2
embedding_dim = 100
hits = 0
misses = 0
embeddings_index = {}
with open(path_to_glove_file, errors="ignore") as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
# Prepare embedding matrix
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
# split data into training and test set
#create clients
clients = create_clients(X_train, y_train, num_clients=10, initial='client')
#process and batch the training data for each client
clients_batched = dict()
for (client_name, data) in clients.items():
clients_batched[client_name] = batch_data(data)
#process and batch the test set
test_batched = tf.data.Dataset.from_tensor_slices((X_test, y_test)).batch(len(y_test))
comms_round = 10
#create optimizer
# create optimizer
lr = 0.001
loss = 'categorical_crossentropy'
metrics = metrics
optimizer = keras.optimizers.Adam(lr=lr,
decay=lr / comms_round,
)
#initialize global model
smlp_global = SimpleMLP()
global_model = smlp_global.build([None], 41)
#commence global training loop
for comm_round in range(comms_round):
# get the global model's weights - will serve as the initial weights for all local models
global_weights = global_model.get_weights()
#initial list to collect local model weights after scalling
scaled_local_weight_list = list()
#randomize client data - using keys
client_names= list(clients_batched.keys())
random.shuffle(client_names)
#loop through each client and create new local model
for client in client_names:
smlp_local = SimpleMLP()
local_model = smlp_local.build([None], 41)
local_model.compile(loss=loss,
optimizer=optimizer,
metrics=metrics)
#set local model weight to the weight of the global model
local_model.set_weights(global_weights)
#fit local model with client's data
local_model.fit(clients_batched[client], epochs=1, verbose=0)
#scale the model weights and add to list
scaling_factor = weight_scalling_factor(clients_batched, client)
scaled_weights = scale_model_weights(local_model.get_weights(), scaling_factor)
scaled_local_weight_list.append(scaled_weights)
#clear session to free memory after each communication round
K.clear_session()
#to get the average over all the local model, we simply take the sum of the scaled weights
average_weights = sum_scaled_weights(scaled_local_weight_list)
#update global model
global_model.set_weights(average_weights)
#test global model and print out metrics after each communications round
for(X_test, Y_test) in test_batched:
global_acc, global_loss = test_model(X_test, Y_test, global_model, comm_round)