Multi Layer Perceptron (MLP) Example with the Cognitive Toolkit (CNTK)

In [1]:
# initialize the environment
import cntk as C
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
np.random.seed(123)
In [2]:
# visualize the data
X = np.concatenate([ np.random.multivariate_normal([-3, -3], [[0.1, 0], [0, 0.1]], 100),
                     np.random.multivariate_normal([ 3,  3], [[0.1, 0], [0, 0.1]], 100),
                     np.random.multivariate_normal([-3,  3], [[0.1, 0], [0, 0.1]], 100),
                     np.random.multivariate_normal([ 3, -3], [[0.1, 0], [0, 0.1]], 100) ]).astype(np.float32)
class_vector = np.concatenate([[0] * 200, [1] * 200]).reshape(400, 1)
plt.scatter(X[:,0], X[:,1], c = class_vector, marker = 'x')
plt.show()
In [3]:
# declare the input and output variables
input_feature_count = 2
output_class_count = 2
input = C.ops.input_variable(input_feature_count)
label = C.ops.input_variable(output_class_count)
In [4]:
# declare the model
hidden_feature_count = 2
model = C.layers.Sequential([ C.layers.Dense(hidden_feature_count, init = C.layers.glorot_uniform(), activation = C.layers.sigmoid, name = "hidden_layer"),
                              C.layers.Dense(output_class_count, init = C.layers.glorot_uniform(), activation = None, name = "output_layer") ])(input)
loss = C.losses.cross_entropy_with_softmax(model, label)
eval = C.metrics.classification_error(model, label)
learning_rate = 0.5
lr_schedule = C.learners.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
learner = C.learners.sgd(model.parameters, lr_schedule)
trainer = C.train.trainer.Trainer(model, (loss, eval), [learner])
In [5]:
# train the model
for i in range(100):
    X = np.concatenate([ np.random.multivariate_normal([-3, -3], [[0.1, 0], [0, 0.1]], 100),
                         np.random.multivariate_normal([ 3,  3], [[0.1, 0], [0, 0.1]], 100),
                         np.random.multivariate_normal([-3,  3], [[0.1, 0], [0, 0.1]], 100),
                         np.random.multivariate_normal([ 3, -3], [[0.1, 0], [0, 0.1]], 100) ]).astype(np.float32)
    class_vector = np.concatenate([[0] * 200, [1] * 200]).reshape(400, 1)
    class_indicators = [class_vector == class_index for class_index in range(output_class_count)]
    Y = np.asarray(np.hstack(class_indicators)).astype(np.float32)
    trainer.train_minibatch({input : X, label : Y})
    print ("Minibatch: {0}, Loss: {1:.4f}, Error: {2:.2f}".format(i, trainer.previous_minibatch_loss_average, trainer.previous_minibatch_evaluation_average))
Minibatch: 0, Loss: 0.7462, Error: 0.50
Minibatch: 1, Loss: 0.7174, Error: 0.74
Minibatch: 2, Loss: 0.7031, Error: 0.64
Minibatch: 3, Loss: 0.6998, Error: 0.56
Minibatch: 4, Loss: 0.6956, Error: 0.51
Minibatch: 5, Loss: 0.6926, Error: 0.49
Minibatch: 6, Loss: 0.6914, Error: 0.48
Minibatch: 7, Loss: 0.6888, Error: 0.47
Minibatch: 8, Loss: 0.6862, Error: 0.47
Minibatch: 9, Loss: 0.6872, Error: 0.46
Minibatch: 10, Loss: 0.6843, Error: 0.45
Minibatch: 11, Loss: 0.6845, Error: 0.44
Minibatch: 12, Loss: 0.6829, Error: 0.43
Minibatch: 13, Loss: 0.6805, Error: 0.43
Minibatch: 14, Loss: 0.6793, Error: 0.40
Minibatch: 15, Loss: 0.6797, Error: 0.44
Minibatch: 16, Loss: 0.6752, Error: 0.37
Minibatch: 17, Loss: 0.6748, Error: 0.42
Minibatch: 18, Loss: 0.6741, Error: 0.39
Minibatch: 19, Loss: 0.6712, Error: 0.38
Minibatch: 20, Loss: 0.6693, Error: 0.36
Minibatch: 21, Loss: 0.6666, Error: 0.34
Minibatch: 22, Loss: 0.6658, Error: 0.36
Minibatch: 23, Loss: 0.6620, Error: 0.30
Minibatch: 24, Loss: 0.6600, Error: 0.31
Minibatch: 25, Loss: 0.6597, Error: 0.29
Minibatch: 26, Loss: 0.6564, Error: 0.28
Minibatch: 27, Loss: 0.6520, Error: 0.26
Minibatch: 28, Loss: 0.6494, Error: 0.27
Minibatch: 29, Loss: 0.6452, Error: 0.26
Minibatch: 30, Loss: 0.6436, Error: 0.26
Minibatch: 31, Loss: 0.6410, Error: 0.25
Minibatch: 32, Loss: 0.6368, Error: 0.26
Minibatch: 33, Loss: 0.6321, Error: 0.25
Minibatch: 34, Loss: 0.6292, Error: 0.26
Minibatch: 35, Loss: 0.6240, Error: 0.25
Minibatch: 36, Loss: 0.6195, Error: 0.25
Minibatch: 37, Loss: 0.6164, Error: 0.25
Minibatch: 38, Loss: 0.6109, Error: 0.25
Minibatch: 39, Loss: 0.6061, Error: 0.25
Minibatch: 40, Loss: 0.5994, Error: 0.25
Minibatch: 41, Loss: 0.5949, Error: 0.25
Minibatch: 42, Loss: 0.5892, Error: 0.25
Minibatch: 43, Loss: 0.5824, Error: 0.25
Minibatch: 44, Loss: 0.5763, Error: 0.25
Minibatch: 45, Loss: 0.5710, Error: 0.25
Minibatch: 46, Loss: 0.5642, Error: 0.25
Minibatch: 47, Loss: 0.5579, Error: 0.04
Minibatch: 48, Loss: 0.5510, Error: 0.00
Minibatch: 49, Loss: 0.5442, Error: 0.00
Minibatch: 50, Loss: 0.5365, Error: 0.00
Minibatch: 51, Loss: 0.5299, Error: 0.00
Minibatch: 52, Loss: 0.5223, Error: 0.00
Minibatch: 53, Loss: 0.5156, Error: 0.00
Minibatch: 54, Loss: 0.5084, Error: 0.00
Minibatch: 55, Loss: 0.5000, Error: 0.00
Minibatch: 56, Loss: 0.4929, Error: 0.00
Minibatch: 57, Loss: 0.4845, Error: 0.00
Minibatch: 58, Loss: 0.4778, Error: 0.00
Minibatch: 59, Loss: 0.4691, Error: 0.00
Minibatch: 60, Loss: 0.4631, Error: 0.00
Minibatch: 61, Loss: 0.4543, Error: 0.00
Minibatch: 62, Loss: 0.4463, Error: 0.00
Minibatch: 63, Loss: 0.4412, Error: 0.00
Minibatch: 64, Loss: 0.4315, Error: 0.00
Minibatch: 65, Loss: 0.4243, Error: 0.00
Minibatch: 66, Loss: 0.4163, Error: 0.00
Minibatch: 67, Loss: 0.4093, Error: 0.00
Minibatch: 68, Loss: 0.4023, Error: 0.00
Minibatch: 69, Loss: 0.3946, Error: 0.00
Minibatch: 70, Loss: 0.3872, Error: 0.00
Minibatch: 71, Loss: 0.3800, Error: 0.00
Minibatch: 72, Loss: 0.3732, Error: 0.00
Minibatch: 73, Loss: 0.3670, Error: 0.00
Minibatch: 74, Loss: 0.3592, Error: 0.00
Minibatch: 75, Loss: 0.3527, Error: 0.00
Minibatch: 76, Loss: 0.3474, Error: 0.00
Minibatch: 77, Loss: 0.3406, Error: 0.00
Minibatch: 78, Loss: 0.3345, Error: 0.00
Minibatch: 79, Loss: 0.3276, Error: 0.00
Minibatch: 80, Loss: 0.3219, Error: 0.00
Minibatch: 81, Loss: 0.3169, Error: 0.00
Minibatch: 82, Loss: 0.3111, Error: 0.00
Minibatch: 83, Loss: 0.3043, Error: 0.00
Minibatch: 84, Loss: 0.3004, Error: 0.00
Minibatch: 85, Loss: 0.2936, Error: 0.00
Minibatch: 86, Loss: 0.2900, Error: 0.00
Minibatch: 87, Loss: 0.2839, Error: 0.00
Minibatch: 88, Loss: 0.2781, Error: 0.00
Minibatch: 89, Loss: 0.2740, Error: 0.00
Minibatch: 90, Loss: 0.2695, Error: 0.00
Minibatch: 91, Loss: 0.2653, Error: 0.00
Minibatch: 92, Loss: 0.2600, Error: 0.00
Minibatch: 93, Loss: 0.2559, Error: 0.00
Minibatch: 94, Loss: 0.2521, Error: 0.00
Minibatch: 95, Loss: 0.2467, Error: 0.00
Minibatch: 96, Loss: 0.2423, Error: 0.00
Minibatch: 97, Loss: 0.2388, Error: 0.00
Minibatch: 98, Loss: 0.2344, Error: 0.00
Minibatch: 99, Loss: 0.2299, Error: 0.00
In [6]:
# view the weights and biases
print(model.hidden_layer.W.value)
print(model.hidden_layer.b.value)
print(model.output_layer.W.value)
print(model.output_layer.b.value)
[[ 1.02382839  1.08454692]
 [ 1.02609062  1.0881623 ]]
[ 1.90145743 -2.3765676 ]
[[-2.58281779  0.79538554]
 [ 1.34072065 -2.63346553]]
[ 0.52333379 -0.52333283]
In [7]:
# visualize the hidden features
def sigmoid(x):
    return (1.0 / (1.0 + np.exp(-x)))
z1 = sigmoid(np.dot(X, model.hidden_layer.W.value[:,0]) + model.hidden_layer.b.value[0])
z2 = sigmoid(np.dot(X, model.hidden_layer.W.value[:,1]) + model.hidden_layer.b.value[1])
plt.scatter(z1, z2, c = class_vector, marker = 'x')
plt.show()
In [8]:
# check out the mappings: from input features to hidden features
print([np.mean(z1[0:100]), np.mean(z2[0:100])])
print([np.mean(z1[100:200]), np.mean(z2[100:200])])
print([np.mean(z1[200:300]), np.mean(z2[200:300])])
print([np.mean(z1[300:400]), np.mean(z2[300:400])])
[0.016060255, 0.00015912065]
[0.99966568, 0.98350954]
[0.86474818, 0.092182301]
[0.86586058, 0.09470854]
In [ ]: