python - Keras Custom Layer - AttributeError: 'Tensor' object has no attribute '_keras_history' -


so big picture, i'm trying make keras w2v auto-encoder. tried follow customvariationallayer class official example.

my class this:

class custom_ae_layer(layer):     """custom keras layer handle looking wv inputs     example https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder.py     """     def __init__(self, **kwargs):         self.is_placeholder = true         super(custom_ae_layer, self).__init__(**kwargs)     def ae_loss(self, reconstruction,emb_lookup):         loss = k.sum(emb_lookup - reconstruction,axis=-1)         return k.mean(loss)      def call(self, inputs):         reconstruction = inputs[1]         emb_lookup = inputs[0]         loss = self.ae_loss(emb_lookup,reconstruction)         self.add_loss(loss)         return emb_lookup 

this error occurs regardless of if return emb_lookup or reconstruction. major difference between layer , official example use embedding lookup input, output of keras.layers.embedding object, , reconstruction

recon_layer = dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input) s_recon_layer = k.squeeze(recon_layer,2) 

this error occurs regardless of if return emb_lookup or reconstruction.


full error message this:

traceback (most recent call last):       file "semi_sup_cnn_big_data_test.py", line 166, in <module>         main()       file "semi_sup_cnn_big_data_test.py", line 84, in main         args,run_time,micro,macro = basic_cnn_train_val_test(args)       file "semi_sup_cnn_big_data_test.py", line 100, in basic_cnn_train_val_test         clf,args = init_export_network(args)       file "/home/qqi/git/mpi_cnn/models/auto_encoder_multilayer_cnn.py", line 257, in init_export_network         model = model(model_input, y)       file "/usr/local/lib/python3.5/dist-packages/keras/legacy/interfaces.py", line 88, in wrapper         return func(*args, **kwargs)       file "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1705, in __init__         build_map_of_graph(x, finished_nodes, nodes_in_progress)       file "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1695, in build_map_of_graph         layer, node_index, tensor_index)       file "/usr/local/lib/python3.5/dist-packages/keras/engine/topology.py", line 1665, in build_map_of_graph         layer, node_index, tensor_index = tensor._keras_history     attributeerror: 'tensor' object has no attribute '_keras_history' 

as requested, here full init_export_network function:

    def init_export_network(in_args):         import_dir = os.path.join('cv_data',                                   in_args.data_name,                                   in_args.label_name,                                   in_args.this_fold)          # set output dir models/[model_name]/[data_name]/[label_file_name]/[this_fold]         output_dir = os.path.join("initialized_models",                                   in_args.model_name,                                   in_args.data_name,                                   in_args.label_name,                                   in_args.this_fold)         print("exporting to", output_dir)         if not os.path.exists(output_dir):             os.makedirs(output_dir)         else:             print(output_dir, "data dir identified re-populated")             shutil.rmtree(output_dir)             os.makedirs(output_dir)         "returns base cnn architecture , placeholder/untrained weights"         # unpckl wv_matrix, class_names         wv_matrix = unpckl(os.path.join(import_dir,'wv_matrix.pickle'))         print("valid pre-processed data found in", import_dir)         # define network layers ----------------------------------------------------         input_shape = (in_args.seq_len,)         output_shape = (in_args.seq_len,len(wv_matrix[0]),)         emb_size = len(wv_matrix[0])         model_input = input(shape=input_shape)         emb_lookup = embedding(len(wv_matrix),                                len(wv_matrix[0]),                                embeddings_regularizer=l2(in_args.emb_l2_rate),                                input_length=in_args.seq_len, name="embedding")(model_input)         #emb_lookup = embedding(len(wv_matrix), len(wv_matrix[0]), input_length=in_args.seq_len, name="embedding", )(model_input)         if in_args.emb_dropout:             emb_lookup = dropout(in_args.emb_dropout)(emb_lookup)         conv_blocks = []         # conv blocks --------------------------------------------------------------         print("emb_lookup shape!!!!",emb_lookup.shape)         ith_conv,sz in enumerate(in_args.filter_sizes):             if ith_conv == 0:                 conv_input = emb_lookup             else:                 conv_input = conv             conv = convolution1d(filters=in_args.feat_maps[ith_conv],                                  kernel_size=sz,                                  padding="valid",                                  activation="relu",                                  kernel_initializer = 'lecun_uniform',                                  kernel_regularizer=l2(in_args.l2_rate),                                  strides=1,                                  name = "{}_conv".format(ith_conv))(conv_input)             print("{}_conv".format(ith_conv), conv.shape)         # deconv blocks dimensions reverse of multilayer_cnn ------------------         deconv_blocks = []         deconv_filter_sizes = in_args.filter_sizes         deconv_filter_sizes.reverse()          #print("conv_shape!!!", conv.shape)         conv_input = conv         print("conv_upsampling_shape!!!", conv_input.shape)          #unpool_shape = ((conv[1],-1,conv[2]))         #conv_input = reshape((1,conv_input[1],conv_input[2]))(conv_input)         #print("conv_input_shape!!!", conv_input.shape)          #conv_input = reshape(unpool_shape),conv_input         #conv_input = reshape(unpool_shape)(conv_input)         deconv_input=k.expand_dims(conv_input,2)          print("conv_reshape_shape!!!", conv_input)         ith_conv,sz in enumerate(deconv_filter_sizes):             print("{}_deconv input shape!!!".format(ith_conv), deconv_input)             deconv = conv2dtranspose(filters=in_args.feat_maps[ith_conv],                                  kernel_size=(sz,1),                                  #kernel_size=sz,                                  padding="valid",                                  activation="relu",                                  kernel_initializer = 'lecun_uniform',                                  kernel_regularizer=l2(in_args.l2_rate),                                  strides=(1,1),                                  name = "{}_deconv".format(ith_conv))(deconv_input)             deconv_input = deconv         print("{}_deconv input shape!!!".format(ith_conv), deconv_input)         print("deconv_output shape",deconv)         #z = flatten()(conv)         #deconv_out = flatten(deconv)         #outshape = (in_args.seq_len,len(wv_matrix[0]))         outshape = len(wv_matrix[0])         recon_layer = dense(outshape, activation="tanh",kernel_regularizer=l2(in_args.l2_rate))(deconv_input)         print("recon_layer shape",recon_layer)         #s_recon_layer = k.squeeze(recon_layer,2)         s_recon_layer = lambda(lambda x: k.squeeze(x, 2))(recon_layer)         print("squeezed recon_layer shape",s_recon_layer)         #print("conv_reshape_shape!!!", conv_input.shape)(conv)         # end define network layers ------------------------------------------------         #model_output = dense(outshape, activation="elu",kernel_regularizer=l2(in_args.l2_rate))(z)         y = custom_ae_layer()([model_input,emb_lookup,s_recon_layer])         model = model(model_input, y)         # finished network layers definition - compile network         opt = optimizers.adamax()          model.compile(loss=none, optimizer='adamax')         embedding_layer = model.get_layer("embedding")         embedding_layer.set_weights([wv_matrix])         # load wv_matrix embedidng layer         print("initializing embedding layer word2vec weights, shape", wv_matrix.shape)           # save model architecture json         open(os.path.join(output_dir,"structure.json"),"w").write(model.to_json())         # save initialized model weights .hdf5fmacro         model.save_weights(os.path.join(output_dir, "weights"+".hdf5"))         print("multilayer network/initial weights saved in", output_dir)         print(in_args)         #print(model.summary())         return model,in_args 

the error message looks pretty similar question: https://stackoverflow.com/a/45309816/1531463

in short, think need wrap line:

s_recon_layer = k.squeeze(recon_layer,2) 

(or other backend function calls) lambda layer.

specifically,

s_recon_layer = lambda(lambda x: k.squeeze(x, 2))(recon_layer) 

Comments

Popular posts from this blog

node.js - Node js - Trying to send POST request, but it is not loading javascript content -

javascript - Replicate keyboard event with html button -

javascript - Web audio api 5.1 surround example not working in firefox -