How to perform one-dimensional convolution in Tensorflow

Asked 1 years ago, Updated 1 years ago, 41 views

Currently, we are trying to reproduce the following structure on the tensorflow.
In Tutorial, the story revolves around two-dimensional convolution, and we don't know how to accurately describe one-dimensional convolution.
Could you tell me the correct implementation method?

Enter a description of the image here

#Initialize weights with a normal distribution with a standard deviation of 0.1
def weight_variable(shape):
    initial=tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable (initial)

# Initialize bias with a normal distribution with a standard deviation of 0.1
def bias_variable(shape):
    initial=tf.constant(0.1,shape=shape)
    return tf.Variable (initial)

# Create First Layer Convolutional Layer
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

# Creating an X2 Pooling Layer
def max_pool_2x128(x):
    return tf.nn.max_pool(x,ksize=[1,2,1,1], strides=[1,2,1,1], padding='VALID')
# Creating an X4 Pooling Layer
def max_pool_4x128(x):
    return tf.nn.max_pool(x,ksize=[1,4,1,1], strides=[1,4,1,1], padding='VALID')

# first-layer convolution
with tf.name_scope('conv1') as scope:
    W_conv1 = weight_variable ([4,1,128,256])
    b_conv1 = bias_variable ([256])

    x_image=tf.reshape (images_placeholder, [-1,599,1,128])

    h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)

# First layer pooling X4
with tf.name_scope('pool1') as scope:
    h_pool1 = max_pool_4x128(h_conv1)

# second-layer convolution
with tf.name_scope('conv2') as scope:
    W_conv2 = weight_variable ([4,1,256,256])
    b_conv2 = bias_variable ([256])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

# Second layer pooling X2
with tf.name_scope('pool2') as scope:
    h_pool2 = max_pool_2x2(h_conv2)

# flattening
with tf.name_scope('fc1') as scope:
    W_fc1 = weight_variable ([73*1*256,1024])
    b_fc1 = bias_variable ([1024])
    h_pool2_flat=tf.reshape (h_pool2, [-1,73*1*256])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    # Configuring the Drop Layer
    keep_prob=tf.placeholder("float")
    h_fc1_drop=tf.nn.dropout(h_fc1, keep_prob)

# second binding layer
with tf.name_scope('fc2') as scope:
    W_fc2 = weight_variable ([1024, NUM_CLASSES])
    b_fc2 = bias_variable ([NUM_CLASSES])

# output layer
with tf.name_scope('softmax') as scope:
    y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)

return_conv

python tensorflow

2022-09-30 15:51

2 Answers

W_conv1 = weight_variable ([4,1,128,256])

However, this is [filter_height*filter_width*in_channels, output_channels] where you specify the filter.

So it's 4*128px, and since it looks like a color image, the channel is 3

W_conv1 = weight_variable ([4,128,3,32])

I think it will look like this.

Other parts seem to have the wrong size specification.
For more information, please refer to the tensorflow documentation.
https://www.tensorflow.org/versions/r0.7/api_docs/python/nn.html#conv2d


2022-09-30 15:51

There is a one-dimensional one called tf.nn.conv1d.
I call it tf.nn.conv2d in this function, so I don't think it's a problem to use conv2d.
Rewrite with tf.nn.conv1d

def conv2d(x,W):
    return tf.nn.conv1d(x,W,1,'SAME')

appears in


2022-09-30 15:51

If you have any answers or tips


© 2024 OneMinuteCode. All rights reserved.