问题题目: In this project you will implement the backpropagation algorithm for learning the weights of a neural network.
Download the code template here. You should write your code between the comments marked "Begin/End your code". You may add extra functions as you like.
You will implement four functions. calculate() applies the neural network to a given input and returns the output of the network (i.e. the output of the last neuron). squared_error() calculates the error of the network on a set of examples. backpropagate_example() applies the backpropagation algorithm to learn the weights for a given example. learn() repeatedly applies the backpropagation algorithm to each of the input examples, repeating num_epochs times (that is, for 4 examples and 100 epochs, learn() will call backpropagate_example() 400 times.)
You will use your code to learn a network that implements the XOR function. The code defines a framework you can use to test your code. It defines four examples, with the attributes and labels in example_attributes and example_labels respectively. You should generate a random initial ANN using random_ann() and train it using ANN.learn(), using a learning rate of 10 and 10,000 epochs. Train for 10 separate initializations and choose the one that achieved the best squared error. Hard-code your best trained model in your solution file.
问题code-Python:
import random
import math
import time
import copy
#####################################################
#####################################################
# Please enter the number of hours you spent on this
# project here
num_hours_i_spent_on_this_project = 1.5
#####################################################
#####################################################
def logistic(x):
return 1.0 / (1.0 + math.exp(-x))
def logistic_derivative(x):
return logistic(x) * (1-logistic(x))
def inv_logistic(y):
return -1 * math.log(1. / y - 1)
class Neuron:
def __init__(self, attribute_weights, neuron_weights, bias_weight):
# neuron.attribute_weights[i] = Weight of input attribute i as input to this neuron
self.attribute_weights = attribute_weights
# neuron.neuron_weights[i] = Weight of neuron j as input to this neuron
self.neuron_weights = neuron_weights
self.bias_weight = bias_weight
def __repr__(self):
s = 'attribute_weights: {}\n'.format(self.attribute_weights)
s += 'neuron_weights: {}\n'.format(self.neuron_weights)
s += 'bias_weight: {}\n'.format(self.bias_weight)
return s
class ANN:
def __init__(self, num_attributes, neurons):
# Number of input attributes.
self.num_attributes = num_attributes
# Number of neurons. neurons[-1] is the output neuron.
self.neurons = neurons
self.num_hidden = len(neurons) - 1
for neuron_index, neuron in enumerate(self.neurons):
for input_neuron, input_weight in neuron.neuron_weights.items():
assert(input_neuron < neuron_index) def __repr__(self): """ print """ s = 'ANN:\n' s += 'input -> hidden\n'
for j in range(self.num_hidden):
s += self.neurons[j].__repr__()
s += 'hidden -> output\n'
s += self.neurons[-1].__repr__()
return s
# Calculates the output of the output neuron for given input attributes.
def calculate(self, attributes):
###########################################
# Start your code
# print("My code here")
num_hidden = self.num_hidden
num_attr = len(attributes)
hiddens = []
for i in range(0, num_hidden):
neuron = self.neurons[i] # for ith hidden node, Neuron(attribute_weights,{},bias_weight)
attribute_weights = neuron.attribute_weights # attribute_weights[i] from ith input to hidden node
bias_weight = neuron.bias_weight
hidden = 0
for j in range(0, num_attr):
hidden += attributes[j] * attribute_weights[j]
hidden += bias_weight
# non-linear
hidden = logistic(hidden)
hiddens.append(hidden)
# output layer
out = 0
out_neuron = self.neurons[-1]
out_weights = out_neuron.neuron_weights
out_bias = out_neuron.bias_weight
for i in range(num_hidden):
out += hiddens[i] * out_weights[i]
out += out_bias
out = logistic(out)
return out, hiddens
# End your code
###########################################
# Returns the squared error of a collection of examples:
# Error = 0.5 * sum_i ( example_labels[i] - ann.calculate(example_attributes[i]) )**2
def squared_error(self, example_attributes, example_labels):
###########################################
# Start your code
# print("My code here")
error = 0
for attr, label in zip(example_attributes, example_labels):
out, _ = self.calculate(attr)
error += (label - out) ** 2
error = 0.5 * error
return error
# End your code
###########################################
# Runs backpropagation on a single example in order to
# update the network weights appropriately.
def backpropagate_example(self, attributes, label, learning_rate=1.0):
###########################################
# Start your code
# print("My code here")
# calculate
def update(prev, delta):
return prev - delta * learning_rate
out, hiddens = self.calculate(attributes)
# bp
z = inv_logistic(out)
delta_out = (out - label) * logistic_derivative(z)
delta_out_bias = delta_out # update self.neurons[-1]
delta_out_weight = []
for hidden in hiddens:
delta_out_weight.append(delta_out * hidden)
# update self.neurons[:-1]
num_hidden = self.num_hidden
out_neuron = self.neurons[-1]
out_weights = out_neuron.neuron_weights
out_bias = out_neuron.bias_weight
delta_hidden_weight = [] # list of list
delta_hidden_bias = []
for j in range(num_hidden):
z_j = inv_logistic(hiddens[j])
delta_hidden = delta_out * out_weights[j] * logistic_derivative(z_j)
delta_hidden_bias.append(delta_hidden)
delta_hidden_weight_j = []
for i in range(self.num_attributes):
delta_hidden_weight_j.append(delta_hidden * attributes[i])
delta_hidden_weight.append(delta_hidden_weight_j)
# update
for j in range(num_hidden):
self.neurons[j].bias_weight = update(self.neurons[j].bias_weight, delta_hidden_bias[j])
for i in range(self.num_attributes):
self.neurons[j].attribute_weights[i] = update(self.neurons[j].attribute_weights[i], delta_hidden_weight[j][i])
for j in range(num_hidden):
self.neurons[-1].bias_weight = update(self.neurons[-1].bias_weight, delta_out_bias)
self.neurons[-1].neuron_weights[j] = update(self.neurons[-1].neuron_weights[j], delta_out_weight[j])
# End your code
###########################################
# Runs backpropagation on each example, repeating this process
# num_epochs times.
def learn(self, example_attributes, example_labels, learning_rate=1.0, num_epochs=100):
###########################################
# Start your code
# print("My code here")
for epoch in range(num_epochs):
for attr, label in zip(example_attributes, example_labels):
self.backpropagate_example(attr, label, learning_rate)
# End your code
###########################################
example_attributes = [ [0,0], [0,1], [1,0], [1,1] ]
example_labels = [0,1,1,0]
def random_ann(num_attributes=2, num_hidden=2):
neurons = []
# hidden neurons
for i in range(num_hidden):
attribute_weights = {attribute_index: random.uniform(-1.0,1.0) for attribute_index in range(num_attributes)}
bias_weight = random.uniform(-1.0,1.0)
neurons.append(Neuron(attribute_weights,{},bias_weight))
# output neuron
neuron_weights = {input_neuron: random.uniform(-1.0,1.0) for input_neuron in range(num_hidden)}
bias_weight = random.uniform(-1.0,1.0)
neurons.append(Neuron({},neuron_weights,bias_weight))
ann = ANN(num_attributes, neurons)
return ann
best_ann = None
best_error = float("inf")
for instance_index in range(10):
ann = random_ann()
ann.learn(example_attributes, example_labels, learning_rate=10, num_epochs=10000)
error = ann.squared_error(example_attributes, example_labels)
print('epoch: {}, error: {}'.format(instance_index, error))
if error < best_error: best_error = error best_ann = ann print(best_ann) """ ANN: input -> hidden
attribute_weights: {0: -7.384124055174893, 1: 6.946644212736553}
neuron_weights: {}
bias_weight: -3.6686471765196544
attribute_weights: {0: 7.259475917279755, 1: -7.481837666087849}
neuron_weights: {}
bias_weight: -3.8774855509508788
hidden -> output
attribute_weights: {}
neuron_weights: {0: 12.30015085410383, 1: 12.256716056440913}
bias_weight: -6.141038682796985
"""
#####################################################
#####################################################
# Please hard-code your learned ANN here:
learned_ann = random_ann()
learned_ann.neurons[0].attribute_weights[0] = -7.384124055174893
learned_ann.neurons[0].attribute_weights[1] = 6.946644212736553
learned_ann.neurons[0].bias_weight = -3.6686471765196544
learned_ann.neurons[1].attribute_weights[0] = 7.259475917279755
learned_ann.neurons[1].attribute_weights[1] = -7.481837666087849
learned_ann.neurons[1].bias_weight = -3.8774855509508788
learned_ann.neurons[2].neuron_weights[0] = 12.30015085410383
learned_ann.neurons[2].neuron_weights[1] = 12.256716056440913
learned_ann.neurons[2].bias_weight = -6.141038682796985
# Enter the squared error of this network here:
final_squared_error = 2.2930326434149252e-05
#####################################################
#####################################################
print(learned_ann.squared_error(example_attributes, example_labels))
案例Python英文留学生神经网络题目:Learning the weights of a neur
2019-03-30