Demo entry 6782418



Submitted by anonymous on Jan 14, 2019 at 05:10
Language: Python 3. Code size: 788 Bytes.

def variable_on_cpu(name, shape):
  # Create variables on CPU
  with tf.device("/cpu:0"):
    var = tf.get_variable(name, shape)
  return var

tower_grads = []
for i in xrange(num_gpus):
  with tf.device('/gpu:%d' % i):   # compute loss and gradients on each GPU
    layer1_w = variable_on_cpu("layer1_weight", [feature_dim, layer1_dim])
    layer1_o = tf.matmul(features, layer1_w)
    loss = tf.cross_entropy(layerN_o, labels)
    grads = SGDoptimizer.compute_gradients(loss)

with tf.device('/cpu:0'): # avarage and apply gradient on CPU
  grads = average_gradients(tower_grads)
  train_op = opt.apply_gradients(grads, global_step=global_step)

for step in xrange(FLAGS.max_steps):
  _, loss_value =[train_op, loss])

This snippet took 0.00 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).