How to collect all variables as a list in tensorflow grouped as a function












1












$begingroup$


I am trying to reproduce the cGAN network architecture introduced on the recent paper deep video portrait(2018, Standford)



I have defined Generator as T(x) following the notation of the paper.



And T(x) refer the above listed operation blocks, such as conv_down(), conv_upsample(), biLinearDown() and finalTanH().



I had notated their scope with 'with tf.variable_scope()' syntax.



While I am comprising a loss and optimizer, found that I need to collect those Generator related variables all together since we are going to train with two differet optimizers, one for the discriminator and one for the generator.



Discriminator is upto my co-lleague so it's not my concern, so I just remains it as psheudo.



However, I 'd like to make a list of variables defined in T(x) in my code.



How can I do this? Any help?



import tensorflow as tf
import numpy as np

# hyper-params
learning_rate = 0.0002
epochs = 250
batch_size = 16
N_w = 11 #number of frames concatenated together
channels = 9*N_w
drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]
lambda_ = 100 #for Weighting of T_loss

tf.reset_default_graph()

with tf.Graph().as_default():

def conv_down(x, N, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
with tf.variable_scope("conv_down_{}_count{}".format(N, count)): #N == depth of tensor
x = tf.layers.conv2d(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
x = tf.nn.leaky_relu(x) #for conv_down, implement leakyReLU
return x

def conv_upsample(x, N, drop_rate, count):
with tf.variable_scope("conv_upsamp_{}_count{}".format(N,count)) :
#up
with tf.variable_scope("conv_up_count{}".format(count)):
x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
with tf.variable_scope("convdrop_{}".format(count)):
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)

#refine1
with tf.variable_scope("refine1"):
x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
with tf.variable_scope("rf1drop_out_{}".format(count)):
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)

#refine2
with tf.variable_scope("refine2"):
x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
x = tf.contrib.layers.batch_norm(x)
with tf.variable_scope("rf2drop_out{}".format(count)):
if drop_rate is not 0:
x = tf.nn.dropout(x, keep_prob=drop_rate)
x = tf.nn.relu(x)

return x

def biLinearDown(x, N):
return tf.image.resize_images(x, [N, N])

def finalTanH(x):
with tf.variable_scope("tanh"):
x = tf.nn.tanh(x)
return x

def T(x):
#channel_output_structure
down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
biLinearDown_output= [32, 64, 128] #for skip-connection

#down_sampling
conv1 = conv_down(x, down_channel_output[0], 1)
conv2 = conv_down(conv1, down_channel_output[1], 2)
conv3 = conv_down(conv2, down_channel_output[2], 3)
conv4 = conv_down(conv3, down_channel_output[3], 4)
conv5 = conv_down(conv4, down_channel_output[4], 5)
conv6 = conv_down(conv5, down_channel_output[5], 6)
conv7 = conv_down(conv6, down_channel_output[6], 7)
conv8 = conv_down(conv7, down_channel_output[7], 8)

#upsampling
dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1)
dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 2)
dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 3)
dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 4)
dconv5 = conv_upsample(dconv4, up_channel_output[4], drop_out[4], 5)
dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 6)
dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 7)
dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 8)

#final_tanh
T_x = finalTanH(dconv8)

return T_x

# input_tensor x : to feed as Fake
x = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w

# generated tensor T(x)
T_x = T(x)

# Ground_truth tensor Y : to feed as Real
Y = tf.placeholder(tf.float32, [batch_size, 256, 256, 3]) # just a capture of video frame


# define sheudo Discriminator
def D(x, to_be_discriminated): #truth is either T(x) or GroudnTruth with a shape [256 x 256 x 3]
sheudo_prob = np.float32(np.random.uniform(low=0., high=1.))
return sheudo_prob

theta_D = #tf.Variables of Discriminator

# Discrminated Result
D_real = D(Y)
D_fake = D(T_x)

# Define loss
E_cGAN = tf.reduce_mean(tf.log(D_real)+ tf.log(1. - D_fake))
E_l1 = tf.reduce_mean(tf.norm((Y-T_x)))
Loss = EcGAN + lambda_*E_l1

# Optimizer
D_solver = tf.train.AdamOptimizer().minimize(-Loss, var_list=theta_D) # Only update D(X)'s parameters, so var_list = theta_D
T_solver = tf.train.AdamOptimizer().minimize(Loss, var_list=theta_T) # Only update G(X)'s parameters, so var_list = theta_T


####TEST####
# define sheudo_input for testing
sheudo_x = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 99]))
sheudo_Y = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 3]))


####Run####

init_g = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(output_tensor, feed_dict={x: sheudo_input Y: sheudo_Y})









share|improve this question











$endgroup$




bumped to the homepage by Community 7 mins ago


This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.




















    1












    $begingroup$


    I am trying to reproduce the cGAN network architecture introduced on the recent paper deep video portrait(2018, Standford)



    I have defined Generator as T(x) following the notation of the paper.



    And T(x) refer the above listed operation blocks, such as conv_down(), conv_upsample(), biLinearDown() and finalTanH().



    I had notated their scope with 'with tf.variable_scope()' syntax.



    While I am comprising a loss and optimizer, found that I need to collect those Generator related variables all together since we are going to train with two differet optimizers, one for the discriminator and one for the generator.



    Discriminator is upto my co-lleague so it's not my concern, so I just remains it as psheudo.



    However, I 'd like to make a list of variables defined in T(x) in my code.



    How can I do this? Any help?



    import tensorflow as tf
    import numpy as np

    # hyper-params
    learning_rate = 0.0002
    epochs = 250
    batch_size = 16
    N_w = 11 #number of frames concatenated together
    channels = 9*N_w
    drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]
    lambda_ = 100 #for Weighting of T_loss

    tf.reset_default_graph()

    with tf.Graph().as_default():

    def conv_down(x, N, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
    with tf.variable_scope("conv_down_{}_count{}".format(N, count)): #N == depth of tensor
    x = tf.layers.conv2d(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
    x = tf.contrib.layers.batch_norm(x)
    x = tf.nn.leaky_relu(x) #for conv_down, implement leakyReLU
    return x

    def conv_upsample(x, N, drop_rate, count):
    with tf.variable_scope("conv_upsamp_{}_count{}".format(N,count)) :
    #up
    with tf.variable_scope("conv_up_count{}".format(count)):
    x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
    x = tf.contrib.layers.batch_norm(x)
    with tf.variable_scope("convdrop_{}".format(count)):
    if drop_rate is not 0:
    x = tf.nn.dropout(x, keep_prob=drop_rate)
    x = tf.nn.relu(x)

    #refine1
    with tf.variable_scope("refine1"):
    x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
    x = tf.contrib.layers.batch_norm(x)
    with tf.variable_scope("rf1drop_out_{}".format(count)):
    if drop_rate is not 0:
    x = tf.nn.dropout(x, keep_prob=drop_rate)
    x = tf.nn.relu(x)

    #refine2
    with tf.variable_scope("refine2"):
    x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
    x = tf.contrib.layers.batch_norm(x)
    with tf.variable_scope("rf2drop_out{}".format(count)):
    if drop_rate is not 0:
    x = tf.nn.dropout(x, keep_prob=drop_rate)
    x = tf.nn.relu(x)

    return x

    def biLinearDown(x, N):
    return tf.image.resize_images(x, [N, N])

    def finalTanH(x):
    with tf.variable_scope("tanh"):
    x = tf.nn.tanh(x)
    return x

    def T(x):
    #channel_output_structure
    down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
    up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
    biLinearDown_output= [32, 64, 128] #for skip-connection

    #down_sampling
    conv1 = conv_down(x, down_channel_output[0], 1)
    conv2 = conv_down(conv1, down_channel_output[1], 2)
    conv3 = conv_down(conv2, down_channel_output[2], 3)
    conv4 = conv_down(conv3, down_channel_output[3], 4)
    conv5 = conv_down(conv4, down_channel_output[4], 5)
    conv6 = conv_down(conv5, down_channel_output[5], 6)
    conv7 = conv_down(conv6, down_channel_output[6], 7)
    conv8 = conv_down(conv7, down_channel_output[7], 8)

    #upsampling
    dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1)
    dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 2)
    dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 3)
    dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 4)
    dconv5 = conv_upsample(dconv4, up_channel_output[4], drop_out[4], 5)
    dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 6)
    dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 7)
    dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 8)

    #final_tanh
    T_x = finalTanH(dconv8)

    return T_x

    # input_tensor x : to feed as Fake
    x = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w

    # generated tensor T(x)
    T_x = T(x)

    # Ground_truth tensor Y : to feed as Real
    Y = tf.placeholder(tf.float32, [batch_size, 256, 256, 3]) # just a capture of video frame


    # define sheudo Discriminator
    def D(x, to_be_discriminated): #truth is either T(x) or GroudnTruth with a shape [256 x 256 x 3]
    sheudo_prob = np.float32(np.random.uniform(low=0., high=1.))
    return sheudo_prob

    theta_D = #tf.Variables of Discriminator

    # Discrminated Result
    D_real = D(Y)
    D_fake = D(T_x)

    # Define loss
    E_cGAN = tf.reduce_mean(tf.log(D_real)+ tf.log(1. - D_fake))
    E_l1 = tf.reduce_mean(tf.norm((Y-T_x)))
    Loss = EcGAN + lambda_*E_l1

    # Optimizer
    D_solver = tf.train.AdamOptimizer().minimize(-Loss, var_list=theta_D) # Only update D(X)'s parameters, so var_list = theta_D
    T_solver = tf.train.AdamOptimizer().minimize(Loss, var_list=theta_T) # Only update G(X)'s parameters, so var_list = theta_T


    ####TEST####
    # define sheudo_input for testing
    sheudo_x = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 99]))
    sheudo_Y = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 3]))


    ####Run####

    init_g = tf.global_variables_initializer()
    with tf.Session() as sess:
    sess.run(init_g)
    sess.run(output_tensor, feed_dict={x: sheudo_input Y: sheudo_Y})









    share|improve this question











    $endgroup$




    bumped to the homepage by Community 7 mins ago


    This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.


















      1












      1








      1


      2



      $begingroup$


      I am trying to reproduce the cGAN network architecture introduced on the recent paper deep video portrait(2018, Standford)



      I have defined Generator as T(x) following the notation of the paper.



      And T(x) refer the above listed operation blocks, such as conv_down(), conv_upsample(), biLinearDown() and finalTanH().



      I had notated their scope with 'with tf.variable_scope()' syntax.



      While I am comprising a loss and optimizer, found that I need to collect those Generator related variables all together since we are going to train with two differet optimizers, one for the discriminator and one for the generator.



      Discriminator is upto my co-lleague so it's not my concern, so I just remains it as psheudo.



      However, I 'd like to make a list of variables defined in T(x) in my code.



      How can I do this? Any help?



      import tensorflow as tf
      import numpy as np

      # hyper-params
      learning_rate = 0.0002
      epochs = 250
      batch_size = 16
      N_w = 11 #number of frames concatenated together
      channels = 9*N_w
      drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]
      lambda_ = 100 #for Weighting of T_loss

      tf.reset_default_graph()

      with tf.Graph().as_default():

      def conv_down(x, N, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
      with tf.variable_scope("conv_down_{}_count{}".format(N, count)): #N == depth of tensor
      x = tf.layers.conv2d(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      x = tf.nn.leaky_relu(x) #for conv_down, implement leakyReLU
      return x

      def conv_upsample(x, N, drop_rate, count):
      with tf.variable_scope("conv_upsamp_{}_count{}".format(N,count)) :
      #up
      with tf.variable_scope("conv_up_count{}".format(count)):
      x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("convdrop_{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      #refine1
      with tf.variable_scope("refine1"):
      x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("rf1drop_out_{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      #refine2
      with tf.variable_scope("refine2"):
      x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("rf2drop_out{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      return x

      def biLinearDown(x, N):
      return tf.image.resize_images(x, [N, N])

      def finalTanH(x):
      with tf.variable_scope("tanh"):
      x = tf.nn.tanh(x)
      return x

      def T(x):
      #channel_output_structure
      down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
      up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
      biLinearDown_output= [32, 64, 128] #for skip-connection

      #down_sampling
      conv1 = conv_down(x, down_channel_output[0], 1)
      conv2 = conv_down(conv1, down_channel_output[1], 2)
      conv3 = conv_down(conv2, down_channel_output[2], 3)
      conv4 = conv_down(conv3, down_channel_output[3], 4)
      conv5 = conv_down(conv4, down_channel_output[4], 5)
      conv6 = conv_down(conv5, down_channel_output[5], 6)
      conv7 = conv_down(conv6, down_channel_output[6], 7)
      conv8 = conv_down(conv7, down_channel_output[7], 8)

      #upsampling
      dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1)
      dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 2)
      dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 3)
      dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 4)
      dconv5 = conv_upsample(dconv4, up_channel_output[4], drop_out[4], 5)
      dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 6)
      dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 7)
      dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 8)

      #final_tanh
      T_x = finalTanH(dconv8)

      return T_x

      # input_tensor x : to feed as Fake
      x = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w

      # generated tensor T(x)
      T_x = T(x)

      # Ground_truth tensor Y : to feed as Real
      Y = tf.placeholder(tf.float32, [batch_size, 256, 256, 3]) # just a capture of video frame


      # define sheudo Discriminator
      def D(x, to_be_discriminated): #truth is either T(x) or GroudnTruth with a shape [256 x 256 x 3]
      sheudo_prob = np.float32(np.random.uniform(low=0., high=1.))
      return sheudo_prob

      theta_D = #tf.Variables of Discriminator

      # Discrminated Result
      D_real = D(Y)
      D_fake = D(T_x)

      # Define loss
      E_cGAN = tf.reduce_mean(tf.log(D_real)+ tf.log(1. - D_fake))
      E_l1 = tf.reduce_mean(tf.norm((Y-T_x)))
      Loss = EcGAN + lambda_*E_l1

      # Optimizer
      D_solver = tf.train.AdamOptimizer().minimize(-Loss, var_list=theta_D) # Only update D(X)'s parameters, so var_list = theta_D
      T_solver = tf.train.AdamOptimizer().minimize(Loss, var_list=theta_T) # Only update G(X)'s parameters, so var_list = theta_T


      ####TEST####
      # define sheudo_input for testing
      sheudo_x = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 99]))
      sheudo_Y = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 3]))


      ####Run####

      init_g = tf.global_variables_initializer()
      with tf.Session() as sess:
      sess.run(init_g)
      sess.run(output_tensor, feed_dict={x: sheudo_input Y: sheudo_Y})









      share|improve this question











      $endgroup$




      I am trying to reproduce the cGAN network architecture introduced on the recent paper deep video portrait(2018, Standford)



      I have defined Generator as T(x) following the notation of the paper.



      And T(x) refer the above listed operation blocks, such as conv_down(), conv_upsample(), biLinearDown() and finalTanH().



      I had notated their scope with 'with tf.variable_scope()' syntax.



      While I am comprising a loss and optimizer, found that I need to collect those Generator related variables all together since we are going to train with two differet optimizers, one for the discriminator and one for the generator.



      Discriminator is upto my co-lleague so it's not my concern, so I just remains it as psheudo.



      However, I 'd like to make a list of variables defined in T(x) in my code.



      How can I do this? Any help?



      import tensorflow as tf
      import numpy as np

      # hyper-params
      learning_rate = 0.0002
      epochs = 250
      batch_size = 16
      N_w = 11 #number of frames concatenated together
      channels = 9*N_w
      drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]
      lambda_ = 100 #for Weighting of T_loss

      tf.reset_default_graph()

      with tf.Graph().as_default():

      def conv_down(x, N, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
      with tf.variable_scope("conv_down_{}_count{}".format(N, count)): #N == depth of tensor
      x = tf.layers.conv2d(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      x = tf.nn.leaky_relu(x) #for conv_down, implement leakyReLU
      return x

      def conv_upsample(x, N, drop_rate, count):
      with tf.variable_scope("conv_upsamp_{}_count{}".format(N,count)) :
      #up
      with tf.variable_scope("conv_up_count{}".format(count)):
      x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=2, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("convdrop_{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      #refine1
      with tf.variable_scope("refine1"):
      x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("rf1drop_out_{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      #refine2
      with tf.variable_scope("refine2"):
      x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
      x = tf.contrib.layers.batch_norm(x)
      with tf.variable_scope("rf2drop_out{}".format(count)):
      if drop_rate is not 0:
      x = tf.nn.dropout(x, keep_prob=drop_rate)
      x = tf.nn.relu(x)

      return x

      def biLinearDown(x, N):
      return tf.image.resize_images(x, [N, N])

      def finalTanH(x):
      with tf.variable_scope("tanh"):
      x = tf.nn.tanh(x)
      return x

      def T(x):
      #channel_output_structure
      down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
      up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
      biLinearDown_output= [32, 64, 128] #for skip-connection

      #down_sampling
      conv1 = conv_down(x, down_channel_output[0], 1)
      conv2 = conv_down(conv1, down_channel_output[1], 2)
      conv3 = conv_down(conv2, down_channel_output[2], 3)
      conv4 = conv_down(conv3, down_channel_output[3], 4)
      conv5 = conv_down(conv4, down_channel_output[4], 5)
      conv6 = conv_down(conv5, down_channel_output[5], 6)
      conv7 = conv_down(conv6, down_channel_output[6], 7)
      conv8 = conv_down(conv7, down_channel_output[7], 8)

      #upsampling
      dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1)
      dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 2)
      dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 3)
      dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 4)
      dconv5 = conv_upsample(dconv4, up_channel_output[4], drop_out[4], 5)
      dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 6)
      dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 7)
      dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 8)

      #final_tanh
      T_x = finalTanH(dconv8)

      return T_x

      # input_tensor x : to feed as Fake
      x = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w

      # generated tensor T(x)
      T_x = T(x)

      # Ground_truth tensor Y : to feed as Real
      Y = tf.placeholder(tf.float32, [batch_size, 256, 256, 3]) # just a capture of video frame


      # define sheudo Discriminator
      def D(x, to_be_discriminated): #truth is either T(x) or GroudnTruth with a shape [256 x 256 x 3]
      sheudo_prob = np.float32(np.random.uniform(low=0., high=1.))
      return sheudo_prob

      theta_D = #tf.Variables of Discriminator

      # Discrminated Result
      D_real = D(Y)
      D_fake = D(T_x)

      # Define loss
      E_cGAN = tf.reduce_mean(tf.log(D_real)+ tf.log(1. - D_fake))
      E_l1 = tf.reduce_mean(tf.norm((Y-T_x)))
      Loss = EcGAN + lambda_*E_l1

      # Optimizer
      D_solver = tf.train.AdamOptimizer().minimize(-Loss, var_list=theta_D) # Only update D(X)'s parameters, so var_list = theta_D
      T_solver = tf.train.AdamOptimizer().minimize(Loss, var_list=theta_T) # Only update G(X)'s parameters, so var_list = theta_T


      ####TEST####
      # define sheudo_input for testing
      sheudo_x = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 99]))
      sheudo_Y = np.float32(np.random.uniform(low=-1., high=1., size=[16, 256,256, 3]))


      ####Run####

      init_g = tf.global_variables_initializer()
      with tf.Session() as sess:
      sess.run(init_g)
      sess.run(output_tensor, feed_dict={x: sheudo_input Y: sheudo_Y})






      machine-learning neural-network deep-learning tensorflow gan






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Jul 21 '18 at 8:17









      Vaalizaadeh

      7,60562264




      7,60562264










      asked Jul 21 '18 at 8:05









      BeverlieBeverlie

      1216




      1216





      bumped to the homepage by Community 7 mins ago


      This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.







      bumped to the homepage by Community 7 mins ago


      This question has answers that may be good or bad; the system has marked it active so that they can be reviewed.
























          1 Answer
          1






          active

          oldest

          votes


















          0












          $begingroup$

          In GANs you have to train some parameters, freeze them and train some other which this operation may occur multiple times. You can do the following sequence of operations.



          Specify all generator related variables inside their corresponding variable scope and after that access them using tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='my_scope'). After that, during training, you can pass these variables as the trainable parameters of your optimiser by setting the var_list of the minimize method. You can also take a look at here.



          If you want to get all the trainable variables, you can get all of them inside of a list using tf.trainable_variables method.



          Maybe it worth looking here for other aspects for freezing variables.



          You can also take a look at Hvass-Labs's implementation of adversarial networks. Take a look at here too.






          share|improve this answer











          $endgroup$














            Your Answer








            StackExchange.ready(function() {
            var channelOptions = {
            tags: "".split(" "),
            id: "557"
            };
            initTagRenderer("".split(" "), "".split(" "), channelOptions);

            StackExchange.using("externalEditor", function() {
            // Have to fire editor after snippets, if snippets enabled
            if (StackExchange.settings.snippets.snippetsEnabled) {
            StackExchange.using("snippets", function() {
            createEditor();
            });
            }
            else {
            createEditor();
            }
            });

            function createEditor() {
            StackExchange.prepareEditor({
            heartbeatType: 'answer',
            autoActivateHeartbeat: false,
            convertImagesToLinks: false,
            noModals: true,
            showLowRepImageUploadWarning: true,
            reputationToPostImages: null,
            bindNavPrevention: true,
            postfix: "",
            imageUploader: {
            brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
            contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
            allowUrls: true
            },
            onDemand: true,
            discardSelector: ".discard-answer"
            ,immediatelyShowMarkdownHelp:true
            });


            }
            });














            draft saved

            draft discarded


















            StackExchange.ready(
            function () {
            StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f35824%2fhow-to-collect-all-variables-as-a-list-in-tensorflow-grouped-as-a-function%23new-answer', 'question_page');
            }
            );

            Post as a guest















            Required, but never shown

























            1 Answer
            1






            active

            oldest

            votes








            1 Answer
            1






            active

            oldest

            votes









            active

            oldest

            votes






            active

            oldest

            votes









            0












            $begingroup$

            In GANs you have to train some parameters, freeze them and train some other which this operation may occur multiple times. You can do the following sequence of operations.



            Specify all generator related variables inside their corresponding variable scope and after that access them using tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='my_scope'). After that, during training, you can pass these variables as the trainable parameters of your optimiser by setting the var_list of the minimize method. You can also take a look at here.



            If you want to get all the trainable variables, you can get all of them inside of a list using tf.trainable_variables method.



            Maybe it worth looking here for other aspects for freezing variables.



            You can also take a look at Hvass-Labs's implementation of adversarial networks. Take a look at here too.






            share|improve this answer











            $endgroup$


















              0












              $begingroup$

              In GANs you have to train some parameters, freeze them and train some other which this operation may occur multiple times. You can do the following sequence of operations.



              Specify all generator related variables inside their corresponding variable scope and after that access them using tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='my_scope'). After that, during training, you can pass these variables as the trainable parameters of your optimiser by setting the var_list of the minimize method. You can also take a look at here.



              If you want to get all the trainable variables, you can get all of them inside of a list using tf.trainable_variables method.



              Maybe it worth looking here for other aspects for freezing variables.



              You can also take a look at Hvass-Labs's implementation of adversarial networks. Take a look at here too.






              share|improve this answer











              $endgroup$
















                0












                0








                0





                $begingroup$

                In GANs you have to train some parameters, freeze them and train some other which this operation may occur multiple times. You can do the following sequence of operations.



                Specify all generator related variables inside their corresponding variable scope and after that access them using tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='my_scope'). After that, during training, you can pass these variables as the trainable parameters of your optimiser by setting the var_list of the minimize method. You can also take a look at here.



                If you want to get all the trainable variables, you can get all of them inside of a list using tf.trainable_variables method.



                Maybe it worth looking here for other aspects for freezing variables.



                You can also take a look at Hvass-Labs's implementation of adversarial networks. Take a look at here too.






                share|improve this answer











                $endgroup$



                In GANs you have to train some parameters, freeze them and train some other which this operation may occur multiple times. You can do the following sequence of operations.



                Specify all generator related variables inside their corresponding variable scope and after that access them using tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='my_scope'). After that, during training, you can pass these variables as the trainable parameters of your optimiser by setting the var_list of the minimize method. You can also take a look at here.



                If you want to get all the trainable variables, you can get all of them inside of a list using tf.trainable_variables method.



                Maybe it worth looking here for other aspects for freezing variables.



                You can also take a look at Hvass-Labs's implementation of adversarial networks. Take a look at here too.







                share|improve this answer














                share|improve this answer



                share|improve this answer








                edited Jul 21 '18 at 10:07

























                answered Jul 21 '18 at 8:16









                VaalizaadehVaalizaadeh

                7,60562264




                7,60562264






























                    draft saved

                    draft discarded




















































                    Thanks for contributing an answer to Data Science Stack Exchange!


                    • Please be sure to answer the question. Provide details and share your research!

                    But avoid



                    • Asking for help, clarification, or responding to other answers.

                    • Making statements based on opinion; back them up with references or personal experience.


                    Use MathJax to format equations. MathJax reference.


                    To learn more, see our tips on writing great answers.




                    draft saved


                    draft discarded














                    StackExchange.ready(
                    function () {
                    StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f35824%2fhow-to-collect-all-variables-as-a-list-in-tensorflow-grouped-as-a-function%23new-answer', 'question_page');
                    }
                    );

                    Post as a guest















                    Required, but never shown





















































                    Required, but never shown














                    Required, but never shown












                    Required, but never shown







                    Required, but never shown

































                    Required, but never shown














                    Required, but never shown












                    Required, but never shown







                    Required, but never shown







                    Popular posts from this blog

                    Ponta tanko

                    Tantalo (mitologio)

                    Erzsébet Schaár