tf.scan, tf.less , tf.assign , projector, tf.dtypes

2020. 1. 5. 23:20분석 Python/Tensorflow

728x90

## tensorflow InteractiveSession , get_shape, expand_dims, linspace

import tensorflow as tf
sess = tf.InteractiveSession()
c = tf.linspace(0.0 ,4.0, 5)
print(c.get_shape())
d = tf.expand_dims(c, 1)
print(d.get_shape())
print(c.eval())
sess.close()

tf.Graph , dtype

with tf.Graph().as_default() :
    c1 =  tf.constant(4, dtype=tf.float32 , name= "float")
    c2 =  tf.constant(4, dtype=tf.float64 , name= "float")
print(c1.name)    
print(c2.name)    

name_scope 

with tf.Graph().as_default() :
    c1 = tf.constant(4 , dtype = tf.float32 , name = "c")
    with tf.name_scope("prefix_name") :
        c2 = tf.constant(4 , dtype = tf.int32 , name = "c")
        c3 = tf.constant(4 , dtype = tf.float32 , name = "c")
print(c1.name)
print(c2.name)
print(c3.name)

NUM_STEPS =10
g = tf.Graph()
wb_ = []
x_data = np.random.randn(2000,3)
w_real = [0.3,0.5,0.1]
b_real = -0.2
noise = np.random.randn(1,2000) * 0.1
y_data = np.matmul(w_real , x_data.T) + b_real + noise
with g.as_default() :
    x= tf.placeholder(tf.float32 , shape = [None,3])
    y_true= tf.placeholder(tf.float32 , shape = None)
    with tf.name_scope("inference") as scope :
        with tf.name_scope("W") : 
            w = tf.Variable([[0,0,0]], dtype=tf.float32 , name = "weights")
        with tf.name_scope("BIAS") : 
            b = tf.Variable(0, dtype=tf.float32 , name = "bias")
        y_pred = tf.matmul(w , tf.transpose(x)) + b
    with tf.name_scope("loss") as scope :
        loss = tf.reduce_mean( tf.square( y_true - y_pred))
    
    with tf.name_scope("train") as scope :
        lr = 1e-5
        optimizer = tf.train.GradientDescentOptimizer(lr)
        train = optimizer.minimize(loss)
    init = tf.global_variables_initializer()
    with tf.Session() as sess :
        sess.run(init)
        for step in range(NUM_STEPS) :
            sess.run(train, feed_dict= {x:x_data , y_true : y_data})
            print(sess.run([w,b]))

tf.scan

## tf.scan
elems = np.array(list("Tensorflow Flow"))
print(elems)

scan_sum = tf.scan(lambda a, x:a+x, elems)
sess = tf.InteractiveSession()
sess.run(scan_sum)

 

tf.slice

probs = tf.random_uniform(shape=[10,2])
a = tf.slice(probs,[0,1],[-1,1])
a.eval(
d = tf.less(a,0.1)
d.eval()
tf.dtypes.cast(d, tf.int32).eval()

assign

tf.reset_default_graph()
assign_value = tf.placeholder(tf.float32, shape =[5])
w = tf.Variable(tf.random_uniform( shape= [5] ), trainable= True )
w = w.assign(assign_value)
value = np.array([0.0,1.0,2.0,4.0,2.0])
init = tf.global_variables_initializer()
with tf.Session() as sess :
    sess.run(init)
    print(sess.run(w , feed_dict = { assign_value : value }))
    #print(sess.run(w_init , feed_dict = { assign_value : value }))

tensorboard projector

merged = tf.summary.merge_all()
LOG_DIR = "./"
with tf.Session() as sess :
    train_writer = tf.summary.FileWriter(LOG_DIR,
                                        graph = tf.get_default_graph())
    saver = tf.train.Saver()
    with open(os.path.join(LOG_DIR, "metadata.tsv"), "w") as metadata :
        metadata.write("Name\tClass\n")
        for k , v in index2word_map.items() :
            metadata.write("%s\t%d\n" % (v,k))
        config = projector.ProjectorConfig()
        embedding = config.embedding.add()
        embedding.tensor_name = embeddings.name
        embedding.metadata_path = os.path.join(LOG_DIR, "metadata.tsv")
        projector.visualize_embeddings(train_writer, config)
        tf.global_variables_initializer().run()
        for step in range(100) :
            #x_batch , y_batch = get_skipgram_batch(batch_size)
            
728x90