草庐IT

python - Tensorflow 相同的训练精度持续

coder 2023-08-22 原文

我被困在 Tensorflow 上的 CNN 模型上。 我的代码如下。

图书馆

# -*- coding: utf-8 -*-
import tensorflow as tf
import time
import json
import numpy as np
import matplotlib.pyplot as plt
import random
import multiprocessing as mp
import glob
import os

型号

def inference(images_placeholder, keep_prob):

    def weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial)

    def bias_variable(shape):
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    # convolution
    def conv2d(x, W):
        return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

    # X2 pooling
    def max_pool_2x128(x):
        return tf.nn.max_pool(x, ksize=[1, 2, 1, 1],strides=[1, 2, 1, 1], padding='VALID')
    # X4 pooling
    def max_pool_4x128(x):
        return tf.nn.max_pool(x, ksize=[1, 4, 1, 1],strides=[1, 4, 1, 1], padding='VALID')

    x_image = tf.reshape(images_placeholder, [-1,599,1,128])

    #1st conv
    with tf.name_scope('conv1') as scope:
        W_conv1 = weight_variable([4, 1, 128, 256])
        b_conv1 = bias_variable([256])

        print "image変形後のshape"
        print tf.Tensor.get_shape(x_image)
        print "conv1の形"
        print tf.Tensor.get_shape(conv2d(x_image, W_conv1))

        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

    #1st pooling X4
    with tf.name_scope('pool1') as scope:
        h_pool1 = max_pool_4x128(h_conv1)
        print "h_pool1の形"
        print tf.Tensor.get_shape(h_pool1)

    #2nd conv
    with tf.name_scope('conv2') as scope:
        W_conv2 = weight_variable([4, 1, 256, 256])
        b_conv2 = bias_variable([256])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

    #2nd pooling X2
    with tf.name_scope('pool2') as scope:
        h_pool2 = max_pool_2x128(h_conv2)
        print "h_pool2の形"
        print tf.Tensor.get_shape(h_pool2)

    #3rd conv
    with tf.name_scope('conv3') as scope:
        W_conv3 = weight_variable([4, 1, 256, 512])
        b_conv3 = bias_variable([512])
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

    #3rd pooling X2
    with tf.name_scope('pool3') as scope:
        h_pool3 = max_pool_2x128(h_conv3)
        print "h_pool3の形"
        print tf.Tensor.get_shape(h_pool3)

    #flatten + 1st fully connected
    with tf.name_scope('fc1') as scope:
        W_fc1 = weight_variable([37 * 1 * 512, 2048])
        b_fc1 = bias_variable([2048])
        h_pool3_flat = tf.reshape(h_pool3, [-1, 37 * 1 * 512])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
        #ドロップ層の設定
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    #2nd fully connected
    with tf.name_scope('fc2') as scope:
        W_fc2 = weight_variable([2048, NUM_CLASSES])
        b_fc2 = bias_variable([NUM_CLASSES])

    #softmax output
    with tf.name_scope('softmax') as scope:
        y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    return y_conv

损失

def loss(logits, labels):
    # cross entropy
    cross_entropy = -tf.reduce_sum(labels*tf.log(tf.clip_by_value(logits,1e-10,1.0)))
    # TensorBoard
    tf.scalar_summary("cross_entropy", cross_entropy)
    return cross_entropy

训练

def training(loss, learning_rate):
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    return train_step

准确性

def accuracy(logits, labels):
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    tf.scalar_summary("accuracy", accuracy)
    return accuracy

主要内容

if __name__ == '__main__':

    flags = tf.app.flags
    FLAGS = flags.FLAGS

    flags.DEFINE_string('train_dir', '/tmp/data', 'Directory to put the training data.')
    flags.DEFINE_integer('max_steps', , 'Number of steps to run trainer.')
    flags.DEFINE_integer('batch_size', 10, 'Batch size'
                         'Must divide evenly into the dataset sizes.')
    flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')

    #num output
    NUM_CLASSES = 5
    #num frame
    IMAGE_SIZE = 599
    #tensor shape
    IMAGE_PIXELS = IMAGE_SIZE*1*128

    ##################
    #modify the data #
    ##################

    #number of training data
    train_num = 70
    #loading data limit
    data_limit = 100

    flatten_data = []
    flatten_label = []

    # データの整形
    filenames = glob.glob(os.path.join('/Users/kosukefukui/Qosmo/WASABEAT/song_features/*.json'))
    filenames = filenames[0:data_limit]
    print "----loading data---"
    for file_path in filenames:
        data = json.load(open(file_path))
        data = np.array(data)

        for_flat = np.array(data)
        assert for_flat.flatten().shape == (IMAGE_PIXELS,)
        flatten_data.append(for_flat.flatten().tolist())

    # ラベルの整形
    f2 = open("id_information.txt")
    print "---loading labels----"

    for line in f2:
        line = line.rstrip()
        l = line.split(",")
        tmp = np.zeros(NUM_CLASSES)
        tmp[int(l[4])] = 1
        flatten_label.append(tmp)

    flatten_label = flatten_label[0:data_limit]

    print "データ数 %s" % len(flatten_data)
    print "ラベルデータ数 %s" % len(flatten_label)

    #train data
    train_image = np.asarray(flatten_data[0:train_num], dtype=np.float32)
    train_label = np.asarray(flatten_label[0:train_num],dtype=np.float32)

    print "訓練データ数 %s" % len(train_image)

    #test data
    test_image = np.asarray(flatten_data[train_num:data_limit], dtype=np.float32)
    test_label = np.asarray(flatten_label[train_num:data_limit],dtype=np.float32)

    print "テストデータ数 %s" % len(test_image)

    print "599×128 = "
    print len(train_image[0])

    f2.close()

    if 1==1:
        # Image Tensor
        images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
        # Label Tensor
        labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))
        # dropout Tensor
        keep_prob = tf.placeholder("float")

        # construct model
        logits = inference(images_placeholder, keep_prob)
        # calculate loss
        loss_value = loss(logits, labels_placeholder)
        # training
        train_op = training(loss_value, FLAGS.learning_rate)
        # accuracy
        acc = accuracy(logits, labels_placeholder)

        saver = tf.train.Saver()
        sess = tf.Session()
        sess.run(tf.initialize_all_variables())
        # for TensorBoard
        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)

        # Training
        for step in range(FLAGS.max_steps):
            for i in range(len(train_image)/FLAGS.batch_size):
                # train for batch_size
                batch = FLAGS.batch_size*i
                sess.run(train_op, feed_dict={
                  images_placeholder: train_image[batch:batch+FLAGS.batch_size],
                  labels_placeholder: train_label[batch:batch+FLAGS.batch_size],
                  keep_prob: 0.5})

            # calculate accuracy at each step
            train_accuracy = sess.run(acc, feed_dict={
                images_placeholder: train_image,
                labels_placeholder: train_label,
                keep_prob: 1.0})
            print "step %d, training accuracy %g"%(step, train_accuracy)

            # add value for Tensorboard at each step
            summary_str = sess.run(summary_op, feed_dict={
                images_placeholder: train_image,
                labels_placeholder: train_label,
                keep_prob:1.0})
            summary_writer.add_summary(summary_str, step)

    # show accuracy for test data
    print "test accuracy %g"%sess.run(acc, feed_dict={
        images_placeholder: test_image,
        labels_placeholder: test_label,
        keep_prob: 1.0})
    # save the last model
    save_path = saver.save(sess, "model.ckpt")

但是,我得到了相同的训练准确率。如何解决这个问题?

step 0, training accuracy 0.142857
step 1, training accuracy 0.142857
step 2, training accuracy 0.142857
step 3, training accuracy 0.142857
step 4, training accuracy 0.142857
step 5, training accuracy 0.142857
step 6, training accuracy 0.142857
step 7, training accuracy 0.142857
step 8, training accuracy 0.142857
step 9, training accuracy 0.142857
test accuracy 0.133333

我引用了以下模型,我的张量板如下。

最佳答案

会不会是您没有最小化正确的张量? 您正在最小化 cross_entropy,但应该是 cross_entropy_mean(代码中的准确性)。

基本上有以下逻辑:

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
      logits, ground_truth_placeholder)

cross_entropy_mean = tf.reduce_mean(cross_entropy)

train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
      cross_entropy_mean)

关于python - Tensorflow 相同的训练精度持续,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/35770360/

有关python - Tensorflow 相同的训练精度持续的更多相关文章

  1. python - 如何使用 Ruby 或 Python 创建一系列高音调和低音调的蜂鸣声? - 2

    关闭。这个问题是opinion-based.它目前不接受答案。想要改进这个问题?更新问题,以便editingthispost可以用事实和引用来回答它.关闭4年前。Improvethisquestion我想在固定时间创建一系列低音和高音调的哔哔声。例如:在150毫秒时发出高音调的蜂鸣声在151毫秒时发出低音调的蜂鸣声200毫秒时发出低音调的蜂鸣声250毫秒的高音调蜂鸣声有没有办法在Ruby或Python中做到这一点?我真的不在乎输出编码是什么(.wav、.mp3、.ogg等等),但我确实想创建一个输出文件。

  2. ruby - 如果指定键的值在数组中相同,如何合并哈希 - 2

    我有一个这样的哈希数组:[{:foo=>2,:date=>Sat,01Sep2014},{:foo2=>2,:date=>Sat,02Sep2014},{:foo3=>3,:date=>Sat,01Sep2014},{:foo4=>4,:date=>Sat,03Sep2014},{:foo5=>5,:date=>Sat,02Sep2014}]如果:date相同,我想合并哈希值。我对上面数组的期望是:[{:foo=>2,:foo3=>3,:date=>Sat,01Sep2014},{:foo2=>2,:foo5=>5:date=>Sat,02Sep2014},{:foo4=>4,:dat

  3. Python 相当于 Perl/Ruby ||= - 2

    这个问题在这里已经有了答案:关闭10年前。PossibleDuplicate:Pythonconditionalassignmentoperator对于这样一个简单的问题表示歉意,但是谷歌搜索||=并不是很有帮助;)Python中是否有与Ruby和Perl中的||=语句等效的语句?例如:foo="hey"foo||="what"#assignfooifit'sundefined#fooisstill"hey"bar||="yeah"#baris"yeah"另外,类似这样的东西的通用术语是什么?条件分配是我的第一个猜测,但Wikipediapage跟我想的不太一样。

  4. java - 什么相当于 ruby​​ 的 rack 或 python 的 Java wsgi? - 2

    什么是ruby​​的rack或python的Java的wsgi?还有一个路由库。 最佳答案 来自Python标准PEP333:Bycontrast,althoughJavahasjustasmanywebapplicationframeworksavailable,Java's"servlet"APImakesitpossibleforapplicationswrittenwithanyJavawebapplicationframeworktoruninanywebserverthatsupportstheservletAPI.ht

  5. 华为OD机试用Python实现 -【明明的随机数】 2023Q1A - 2

    华为OD机试题本篇题目:明明的随机数题目输入描述输出描述:示例1输入输出说明代码编写思路最近更新的博客华为od2023|什么是华为od,od薪资待遇,od机试题清单华为OD机试真题大全,用Python解华为机试题|机试宝典【华为OD机试】全流程解析+经验分享,题型分享,防作弊指南华为o

  6. python - 如何读取 MIDI 文件、更改其乐器并将其写回? - 2

    我想解析一个已经存在的.mid文件,改变它的乐器,例如从“acousticgrandpiano”到“violin”,然后将它保存回去或作为另一个.mid文件。根据我在文档中看到的内容,该乐器通过program_change或patch_change指令进行了更改,但我找不到任何在已经存在的MIDI文件中执行此操作的库.他们似乎都只支持从头开始创建的MIDI文件。 最佳答案 MIDIpackage会为您完成此操作,但具体方法取决于midi文件的原始内容。一个MIDI文件由一个或多个音轨组成,每个音轨是十六个channel中任何一个上的

  7. 「Python|Selenium|场景案例」如何定位iframe中的元素? - 2

    本文主要介绍在使用Selenium进行自动化测试或者任务时,对于使用了iframe的页面,如何定位iframe中的元素文章目录场景描述解决方案具体代码场景描述当我们在使用Selenium进行自动化测试的时候,可能会遇到一些界面或者窗体是使用HTML的iframe标签进行承载的。对于iframe中的标签,如果直接查找是无法找到的,会抛出没有找到元素的异常。比如近在咫尺的例子就是,CSDN的登录窗体就是使用的iframe,大家可以尝试通过F12开发者模式查看到的tag_name,class_name,id或者xpath来定位中的页面元素,会抛出NoSuchElementException异常。解决

  8. jenkins部署1--jenkins+gitee持续集成 - 2

    前置步骤我们都操作完了,这篇开始介绍jenkins的集成。话不多说,看操作1、登录进入jenkins后会让你选择安装插件,选择第一个默认的就行。安装完成后设置账号密码,重新登录。2、配置JDK和Git都需要执行路径,所以需要先把执行路径找到,先进入服务器的docker容器,2.1JDK的路径root@69eef9ee86cf:/usr/bin#echo$JAVA_HOME/usr/local/openjdk-82.2Git的路径root@69eef9ee86cf:/#whichgit/usr/bin/git3、先配置JDK和Git。点击:ManageJenkins>>GlobalToolCon

  9. python ffmpeg 使用 pyav 转换 一组图像 到 视频 - 2

    2022/8/4更新支持加入水印水印必须包含透明图像,并且水印图像大小要等于原图像的大小pythonconvert_image_to_video.py-f30-mwatermark.pngim_dirout.mkv2022/6/21更新让命令行参数更加易用新的命令行使用方法pythonconvert_image_to_video.py-f30im_dirout.mkvFFMPEG命令行转换一组JPG图像到视频时,是将这组图像视为MJPG流。我需要转换一组PNG图像到视频,FFMPEG就不认了。pyav内置了ffmpeg库,不需要系统带有ffmpeg工具因此我使用ffmpeg的python包装p

  10. [工业相机] 分辨率、精度和公差之间的关系 - 2

    📢博客主页:https://blog.csdn.net/weixin_43197380📢欢迎点赞👍收藏⭐留言📝如有错误敬请指正!📢本文由Loewen丶原创,首发于CSDN,转载注明出处🙉📢现在的付出,都会是一种沉淀,只为让你成为更好的人✨文章预览:一.分辨率(Resolution)1、工业相机的分辨率是如何定义的?2、工业相机的分辨率是如何选择的?二.精度(Accuracy)1、像素精度(PixelAccuracy)2、定位精度和重复定位精度(RepeatPrecision)三.公差(Tolerance)四.课后作业(Post-ClassExercises)视觉行业的初学者,甚至是做了1~2年

随机推荐