Home>

I used the softmax function in the neural network, but if the data types do not match, it will be repelled, so I would like to fix it.
The data is a tensor with an input x of 1x64 and a tensor with z of 1x1. Will it be repelled as a tensor type? I would appreciate it if anyone could teach me.

Error statement
output = spikes ['E'] .sum (-1) .softmax (0)
RuntimeError: "softmax" not implemented for'Long'
## Applicable part
    output = spikes ['E'] .sum (-1) .softmax (0)
    predicted = output.argmax (1) .item ()
code
`` ```
import torch
from bindsnet.network import Network
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.network.monitors import Monitor
import numpy as np
Call #pytorch
Call the Network directory of # bindsnet for all major elements
# Call nodes.py in the bindsnet network directory
#Create a layer, in this case create a layer of LIF neurons
#Network element configuration topology
# Call Connection of class of topology of network directory of bindsnet
# Call Monitor of Class of monitor in network directory of bindsnet
time = 100
#Create network
network = Network ()
# Create a population of two neurons, one acting as a source
# The other is to create 5 layers of target neurons
inpt = Input (n = 64, shape = [1,64],

 sum_input = True)
middle = LIFNodes (n = 100, trace = True, sum_input = True)
center = LIFNodes (n = 100, trace = True, sum_input = True)
final = LIFNodes (n = 100, trace = True, sum_input = True)
out = LIFNodes (n = 100, sum_input = True)
# Connection between layers
inpt_middle = Connection (source = inpt, target = middle, wmin = 0, wmax = 1e-1)
middle_center = Connection (source = middle, target = center, wmin = 0, wmax = 1e-1)
center_final = Connection (source = center, target = final, wmin = 0, wmax = 1e-1)final_out = Connection (source = final, target = out, wmin = 0, wmax = 1e-1)
# Connect all 5 layers to the network
network.add_layer (inpt, name ='A')
network.add_layer (middle, name ='B')
network.add_layer (center, name ='C')
network.add_layer (final, name ='D')
network.add_layer (out, name ='E')
foward_connection = Connection (source = inpt, target = middle, w = 0.05 + 0.1 * torch.randn (inpt.n, middle.n))
network.add_connection (connection = forward_connection, source = "A", target = "B")
foward_connection = Connection (source = middle, target = center, w = 0.05 + 0.1 * torch.randn (middle.n, center.n))
network.add_connection (connection = forward_connection, source = "B", target = "C")
foward_connection = Connection (source = center, target = final, w = 0.05 + 0.1 * torch.randn (center.n, final.n))
network.add_connection (connection = forward_connection, source = "C", target = "D")
foward_connection = Connection (source = final, target = out, w = 0.05 + 0.1 * torch.randn (final.n, out.n))
network.add_connection (connection = forward_connection, source = "D", target = "E")
recurrent_connection = Connection (source = out, target = out, w = 0.025 * (torch.eye (out.n) -1),)
network.add_connection (connection = recurrent_connection, source = "E", target = "E")
#Create Monitor only for input and output layers (record voltage and spikes)
inpt_monitor = Monitor (obj = inpt, state_vars = ("s", "v"), time = 500,)
middle_monitor = Monitor (obj = inpt, state_vars = ("s", "v"), time = 500,)
center_monitor = Monitor (obj = inpt, state_vars = ("s", "v"), time = 500,)
final_monitor = Monitor (obj = inpt, state_vars = ("s", "v"), time = 500,)
out_monitor = Monitor (obj = inpt, state_vars = ("s", "v"), time = 500,)
#Connect Monitor to the network
network.add_monitor (monitor = inpt_monitor, name = "A")
network.add_monitor (monitor = middle_monitor, name = "B")
network.add_monitor (monitor = center_monitor, name = "C")
network.add_monitor (monitor = final_monitor, name = "D")
network.add_monitor (monitor = out_monitor, name = "E")
for l in network.layers:
m = Monitor (network.layers [l],

 state_vars = ['s'],

 time = time)
network.add_monitor (m, name = l)
# Load training data
npzfile = np.load ("C: /Users/namae/Desktop/myo-python-1.0.4/myo-armband-nn-master/data/train_set.npz")
x = npzfile ['x'] #load data ndarray typey = npzfile ['y'] #load data ndarry type
x = torch.from_numpy (x) .clone () # Convert to tensor type
x = torch.tensor (x)
y = np.where (y == 1.0) # Find index with element 1
for a, b in enumerate (y):
if a == 1:
y = b
for d in range (19572):
z = y [d, None] # Get label as an integer
z = torch.LongTensor (z) .long () # Convert to tensor type
#Save start of saved training data and spikes and labels per neuron
#Reflect and save data, pair (1 neuron spike and label)
grads = {}
lr, lr_decay = 1e-2, 0.95
criterion = torch.nn.CrossEntropyLoss ()
# Calculation of cross loss function
# x is a 1x64 tensor array, z is a 1x1 tensor array
for i in (zip (x, z)):
#repeat function (element, array, number of repetitions) "E" generates (number of hours x 1 matrix) (time = 100)
inputs = {'A': x.repeat (time, 1),'E_b': torch.ones (time, 1)}
network.run (inputs = inputs, time = time)
# Collect spikes from all layers ('s' is spikes)
spikes = {l: network.monitors [l] .get ('s') for l in network.layers}
# Collect input from all layers
summed_inputs = {l: network.layers [l] .summed for l in network.layers}
# Output softmax function, get prediction label
output = spikes ['E'] .sum (-1) .softmax (0) ☚ Here
predicted = output.argmax (1) .item ()
#Loss and SGD updates
grads ['dl/df'] = summed_inputs ['E'] .softmax (0)
grads ['dl/df'] [y]-= 1
grads ['dl/dw'] = torch.ger (summed_inputs ['A'],

 grads ['dl/df'])
network.connections ['A','E'] .w-= lr * grads ['dl/dw']
# Attenuation rate  
if i>0 and i% 500 == 0:
#lr = lr_decay
network.reset_ ()
  • Answer # 1

    Good evening. It's a little difficult to answer because I haven't been able to execute the code I received, butsoftmax ()As input ofLong TensorSeems to be an error that cannot be used.

    spikes ['E'] .sum (-1)Output of.float ()By addingFloat TensorHow about forcing it to?

    # Added .float ()
    output = spikes ['E'] .sum (-1) .float (). softmax (0)

    I have confirmed the operation by executing the following in Google Colab.

    import torch
    output = torch.randint (-5, 5, (3, 10)) # dummy
    print ("type:", output.dtype) #->type: torch.int64
    # output.sum (-1) .softmax (0)
    #->RuntimeError: "softmax_lastdim_kernel_impl" not implemented for'Long'
    #add .float ()
    output.sum (-1) .float (). softmax (0)

    that's all.