This is another Class TNeuralNetwork for Harbour that I have ported from Python code
https://github.com/FiveTechSoft/TNeuralNetwork
Original Python work by Matt Mazur
#define EPOCHS 100
#define LEARNING_RATE 0.1
function Main()
LOCAL input_data := {0.5, 0.3, 0.2, 0.7, 0.9, 0.1, 0.4, 0.6, 0.8, 0.2, 0.5, 0.7}
LOCAL true_output := {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7}
train_neural_network( input_data, true_output)
return nil
FUNCTION sigmoid(x)
RETURN 1 / (1 + EXP(-x))
FUNCTION sigmoid_derivative(x)
RETURN x * (1 - x)
PROCEDURE initialize_parameters( weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
FOR i := 1 TO 12
FOR j := 1 TO 10
weights_input_hidden[i, j] := hb_Random( 0, 1 ) / 1
NEXT
NEXT
FOR i := 1 TO 10
FOR j := 1 TO 7
weights_hidden_output[i, j] := hb_Random( 0, 1 ) / 1
NEXT
bias_hidden[i] := 0.0
NEXT
FOR i := 1 TO 7
bias_output[i] := 0.0
NEXT
PROCEDURE forward_propagation( inputs, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output, hidden_output, predicted_output)
LOCAL hidden_input[10]
FOR i := 1 TO 10
hidden_input[i] := 0.0
FOR j := 1 TO 12
hidden_input[i] := hidden_input[i] + inputs[j] * weights_input_hidden[j, i]
NEXT
hidden_input[i] := hidden_input[i] + bias_hidden[i]
hidden_output[i] := sigmoid(hidden_input[i])
NEXT
FOR i := 1 TO 7
predicted_output[i] := 0.0
FOR j := 1 TO 10
predicted_output[i] := predicted_output[i] + hidden_output[j] * weights_hidden_output[j, i]
NEXT
predicted_output[i] := predicted_output[i] + bias_output[i]
predicted_output[i] := sigmoid(predicted_output[i])
NEXT
PROCEDURE calculate_loss( predicted_output, true_output )
LOCAL loss := 0.0
FOR i := 1 TO 7
loss := loss + 0.5 * ((predicted_output[i] - true_output[i]) ^ 2)
NEXT
RETURN loss
PROCEDURE backward_propagation( inputs, true_output, hidden_output, predicted_output, weights_hidden_output, weights_input_hidden, bias_hidden, bias_output)
LOCAL output_error[7], output_delta[7]
LOCAL hidden_error, hidden_delta
FOR i := 1 TO 7
output_error[i] := true_output[i] - predicted_output[i]
output_delta[i] := output_error[i] * sigmoid_derivative(predicted_output[i])
NEXT
FOR i := 1 TO 10
hidden_error := 0.0
FOR j := 1 TO 7
hidden_error := hidden_error + output_delta[j] * weights_hidden_output[i, j]
NEXT
hidden_delta := hidden_error * sigmoid_derivative(hidden_output[i])
FOR j := 1 TO 12
weights_input_hidden[j, i] := weights_input_hidden[j, i] + inputs[j] * hidden_delta * LEARNING_RATE
NEXT
bias_hidden[i] := bias_hidden[i] + hidden_delta * LEARNING_RATE
FOR j := 1 TO 7
weights_hidden_output[i, j] := weights_hidden_output[i, j] + hidden_output[i] * output_delta[j] * LEARNING_RATE
NEXT
bias_output[i] := bias_output[i] + output_delta[i] * LEARNING_RATE
NEXT
PROCEDURE train_neural_network( inputs, true_output )
LOCAL weights_input_hidden[12, 10], weights_hidden_output[10, 7], bias_hidden[10], bias_output[7]
LOCAL hidden_output[10], predicted_output[7], loss
initialize_parameters(weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
FOR epoch := 1 TO EPOCHS
hidden_output = Array( 10 )
predicted_output = Array( 7 )
forward_propagation(inputs, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output, @hidden_output, @predicted_output)
loss := calculate_loss(predicted_output, true_output)
backward_propagation(inputs, true_output, hidden_output, predicted_output, weights_hidden_output, weights_input_hidden, bias_hidden, bias_output)
IF Mod(epoch, 100) == 0
? "Epoch", epoch, "Loss:", loss
ENDIF
NEXT
? "Training complete!"
RETURN
Antonio Linares wrote:Querido Juan,
Tienes que pensar en cual van a ser los "inputs" que le vas a dar a la red neuronal y cuales son los "outputs" esperados.
Por ejemplo, imagina que le vas a dar 12 valores diferentes a la vez (en cada sesión de entrenamiento), y que esperas 7 valores diferentes de retorno
En ese caso la red neuronal usaría 12 perceptrones de entrada y 7 perceptrones de salida. Por lo general se usa una capa intermedia, por lo menos, entre la capa de entrada y la de salida. Esa capa intermedia podria usar, por ejemplo, 10 perceptrones.
Conforme se entrena, la red neuronal va ajustándose más y más a los valores esperados, disminuyendo así el error cometido y mejorando su nivel de acierto.
Return to FiveWin for Harbour/xHarbour
Users browsing this forum: No registered users and 95 guests