TensorFlow 유용한 구현 모음 (아직 테스트는 안해봄)

2019. 10. 27. 10:54분석 Python/Tensorflow

728x90
def weight_norm(x, output_dim) :
    input_dim = int(x.get_shape()[-1])
    g = tf.get_variable('g_scalar', shape=[output_dim], dtype=tf.float32, initializer=tf.ones_initializer())
    w = tf.get_variable('weight', shape=[input_dim, output_dim], dtype=tf.float32, initializer=he_init)
    w_init = tf.nn.l2_normalize(w, dim=0) * g  # SAME dim=1

    return tf.variables_initializer(w_init)
weight_init = tf.random_normal_initializer(mean=0.0, stddev=0.02)

weight_regularizer = None
def conv_layer(x, filter_size, kernel, stride=1, padding='SAME', wn=False,sn=True, layer_name="conv"):

    with tf.name_scope(layer_name):
        '''if wn:
            w_init = weight_norm(x, filter_size)

            x = tf.layers.conv2d(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=w_init, strides=stride, padding=padding)
        else :
            x = tf.layers.conv2d(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=he_init, strides=stride, padding=padding)
        '''
        if sn :
            with tf.variable_scope('scope', reuse=tf.AUTO_REUSE):
                w = tf.get_variable("kernel"+layer_name, shape=[kernel[0], kernel[1], x.get_shape()[-1], filter_size], initializer=weight_init,regularizer=weight_regularizer)
                print("w",w)
                #bias = tf.get_variable("bias", [filter_size], initializer=tf.constant_initializer(0.0))
                x = tf.nn.conv2d(input=x, filter=spectral_norm(layer_name,w), strides=[1, stride, stride, 1], padding=padding)
                print("x",x)

        else :
            x = tf.layers.conv2d(inputs=x, filters=filter_size,
                                 kernel_size=kernel, kernel_initializer=weight_init,
                                 kernel_regularizer=weight_regularizer,
                                 strides=stride,padding=padding)


        return x


def deconv_layer(x, filter_size, kernel, stride=1, padding='SAME', wn=False, sn=False, layer_name='deconv'):
    with tf.name_scope(layer_name):
        if wn :
            w_init = weight_norm(x, filter_size)
            x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=w_init, strides=stride, padding=padding)
        else :
            x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size, kernel_size=kernel, kernel_initializer=he_init, strides=stride, padding=padding)

        x_shape = x.get_shape().as_list()
        output_shape = [x_shape[0], x_shape[1] * stride, x_shape[2] * stride, filter_size]
        if sn:
            w = tf.get_variable("kernel", shape=[kernel[0], kernel[1], filter_size, x.get_shape()[-1]], initializer=weight_init,
                                regularizer=weight_regularizer)
            x = tf.nn.conv2d_transpose(x, filter=spectral_normed_weight(w), output_shape=output_shape,
                                       strides=[1, stride, stride, 1], padding='SAME')

        else:
            x = tf.layers.conv2d_transpose(inputs=x, filters=filter_size,
                                           kernel_size=kernel, kernel_initializer=weight_init,
                                           kernel_regularizer=weight_regularizer,
                                           strides=stride, padding='SAME')

        return x


def linear(x, unit, wn=False, layer_name='linear'):
    with tf.name_scope(layer_name):
        if wn :
            w_init = weight_norm(x, unit)
            x = tf.layers.dense(inputs=x, units=unit, kernel_initializer=w_init)
        else :
            x = tf.layers.dense(inputs=x, units=unit, kernel_initializer=he_init)
        return x


def nin(x, unit, wn=False, layer_name='nin'):
    # https://github.com/openai/weightnorm/blob/master/tensorflow/nn.py
    with tf.name_scope(layer_name):
        s = list(map(int, x.get_shape()))
        x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
        x = linear(x, unit, wn, layer_name)
        x = tf.reshape(x, s[:-1] + [unit])


        return x


def gaussian_noise_layer(x, std=0.15):
    noise = tf.random_normal(shape=tf.shape(x), mean=0.0, stddev=std, dtype=tf.float32)
    return x + noise

def Global_Average_Pooling(x):
    return global_avg_pool(x, name='Global_avg_pooling')


def max_pooling(x, kernel, stride):
    return tf.layers.max_pooling2d(x, pool_size=kernel, strides=stride, padding='VALID')


def flatten(x):
    return tf.contrib.layers.flatten(x)


def lrelu(x, leak=0.2, name="lrelu"):
    return tf.maximum(x, leak * x)


def sigmoid(x):
    return tf.nn.sigmoid(x)


def relu(x):
    return tf.nn.relu(x)


def tanh(x):
    return tf.nn.tanh(x)

def conv_concat(x, y):
    x_shapes = x.get_shape()
    y_shapes = y.get_shape()

    return concat([x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], axis=3)


def concat(x, axis=1):
    return tf.concat(x, axis=axis)


def reshape(x, shape):
    return tf.reshape(x, shape=shape)


def batch_norm(x, is_training, scope):
    return tf.contrib.layers.batch_norm(x,
                                        decay=0.9,
                                        updates_collections=None,
                                        epsilon=1e-5,
                                        scale=True,
                                        is_training=is_training,
                                        scope=scope)

def instance_norm(x, is_training, scope):
    with tf.variable_scope(scope):
        epsilon = 1e-5
        mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
        scale = tf.get_variable('scale', [x.get_shape()[-1]],
                                initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
        offset = tf.get_variable('offset', [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
        out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset

        return out

def dropout(x, rate, is_training):
    return tf.layers.dropout(inputs=x, rate=rate, training=is_training)

def rampup(epoch):
    if epoch < 80:
        p = max(0.0, float(epoch)) / float(80)
        p = 1.0 - p
        return math.exp(-p*p*5.0)
    else:
        return 1.0

def rampdown(epoch):
    if epoch >= (300 - 50):
        ep = (epoch - (300 - 50)) * 0.5
        return math.exp(-(ep * ep) / 50)
    else:
        return 1.0

def _l2normalize(v, eps=1e-12):
    return v / tf.sqrt(tf.reduce_sum(tf.square(v)) + eps)

def max_singular_value(W, u, Ip=1):
    _u = u
    _v = 0
    for _ in range(Ip):
        _v = _l2normalize(tf.matmul(_u, W), eps=1e-12)
        _u = _l2normalize(tf.matmul(_v, W, transpose_b=True), eps=1e-12)
    _v = tf.stop_gradient(_v)
    _u = tf.stop_gradient(_u)
    sigma = tf.reduce_sum(tf.matmul(_u, W) * _v)
    return sigma, _u, _v

def spectral_norm(name, W, Ip=1):
    u = tf.get_variable(name + "_u", [1, W.shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)  # 1 x ch
    W_mat = tf.transpose(tf.reshape(W, [-1, W.shape[-1]]))
    sigma, _u, _ = max_singular_value(W_mat, u, Ip)
    with tf.control_dependencies([tf.assign(u, _u)]):
        W_sn = W / sigma
    return W_sn

https://www.twblogs.net/a/5cc0894abd9eee397113d983

 

spectral normalization集成到triple-gan過程中的問題 - 台部落

利用了三個 實例,中間一直出問題。Residual-Dense-Network-Trained-with-cGAN-for-Super-Resolution-master、Spectral_Normalization-Tensorflow-master、SNGAN-master。最終使用了 第一個中的方法。 TripleGAN-Tensorflow-SN中。 具體代碼如下:黑體爲修改的代碼: 1、ops.py中 import tensorflow as tf #import tflearn from tfl

www.twblogs.net

 

728x90