Demo entry 6763557

deep learning

   

Submitted by anonymous on Oct 23, 2018 at 14:31
Language: Python. Code size: 3.2 kB.

import numpy as np
import pandas as pd
import tensorflow as tf
import urllib.request as request
import matplotlib.pyplot as plt

IRIS_TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"

names = ['sepal-length', 'sepal-width','petal-length','petal-width','species']
train = pd.read_csv(IRIS_TRAIN_URL, names=names, skiprows=1)
test = pd.read_csv(IRIS_TEST_URL, names=names,skiprows=1)

Xtrain = train.drop("species", axis=1)
Xtest = test.drop("species", axis=1)

ytrain = pd.get_dummies(train.species)
ytest = pd.get_dummies(test.species)

def create_train_model(hidden_nodes, num_iters):

    tf.reset_default_graph()
    X=tf.placeholder(shape=(120,4), dtype=tf.float64, name='X')
    y=tf.placeholder(shape=(120,3),dtype=tf.float64, name='y')

    W1 = tf.Variable(np.random.rand(4, hidden_nodes), dtype=tf.float64)
    W2 = tf.Variable(np.random.rand(hidden_nodes,3), dtype=tf.float64)

    A1 = tf.sigmoid(tf.matmul(X, W1))
    y_est = tf.sigmoid(tf.matmul(A1, W2))

    deltas = tf.square(y_est - y)
    loss = tf.reduce_sum(deltas)

    optimizer = tf.train.GradientDescentOptimizer(0.005)
    train = optimizer.minimize(loss)

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    for i in range(num_iters):
        sess.run(train, feed_dict= {X:Xtrain, y:ytrain})
        loss_plot[hidden_nodes].append(
            sess.run(loss, feed_dict=
            {X:Xtrain.as_matrix(), y:ytrain.as_matrix()}))
        weights1 = sess.run(W1)
        weights2 = sess.run(W2)
    print("loss(hidden nodes: %d, iterations: %d): %.2f"
          % (hidden_nodes, num_iters, loss_plot[hidden_nodes][-1]))
    return weights1, weights2

num_hidden_nodes = [5, 10, 20]
loss_plot = {5: [], 10: [], 20: []}
weights1 = {5:None, 10:None, 20: None}
weights2 = {5:None, 10:None, 20: None}
num_iters = 2000

plt.figure(figsize=(12,8))
for hidden_nodes in num_hidden_nodes:
    weights1[hidden_nodes], weights2[hidden_nodes]\
    = create_train_model(hidden_nodes, num_iters)
    plt.plot(range(num_iters),
             loss_plot[hidden_nodes],
             label="nn: 4-%d-3" % hidden_nodes)

plt.xlabel('Iteration', fontsize=12)
plt.ylabel('Loss', fontsize=12)
plt.legend(fontsize=12)


X = tf.placeholder(shape=(30,4), dtype=tf.float64, name='X')
y = tf.placeholder(shape=(30,3),dtype=tf.float64, name='y')

for hidden_nodes in num_hidden_nodes:
    W1 = tf.Variable(weights1[hidden_nodes])
    W2 = tf.Variable(weights2[hidden_nodes])
    A1 = tf.sigmoid(tf.matmul(X, W1))
    y_est = tf.sigmoid(tf.matmul(A1,W2))

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        y_est_np = sess.run(y_est, feed_dict={X: Xtest, y:ytest})

    correct = [estimate.argmax(axis=0) == target.argmax(axis=0)
               for estimate, target in zip(y_est_np, ytest.as_matrix())]
    accuracy = 100 * sum(correct) / len(correct)
    print('Network architecure 4-%d-3, accuracy: %.2f%%'
          % (hidden_nodes, accuracy))

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).