请帮助我? 如何将准确性数据写入TensorBoard?
如何将准确性数据写入TensorBoard?
#---------------------------------------------------------------------------------------------------
# def train ----------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------
def train(learn_rate,report_steps,mega_report_steps,batch_size,initial_weights=None):
x,y,params = model.get_training_model()
y_ = tf.placeholder(tf.float32,[None,7 * len(common.CHARS) + 1],name='y-input')
# drop out
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability',keep_prob)
print('\nTraining phase initiated.\n')
#use adamoptimizer to the training step
digits_loss,presence_loss,loss = get_loss(y,y_)
# tensorboard: with tf.name_scope("train"):
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# accuracy best
with tf.name_scope('best'):
best = tf.argmax(tf.reshape(y[:,1:],[-1,7,len(common.CHARS)]),2)
tf.summary.scalar(“ best”,best)#-TensorBoard的最佳准确性?
with tf.name_scope('correct'):
correct = tf.argmax(tf.reshape(y_[:,2)
tf.summary.scalar(“正确”,正确)#-在这里,对TensorBoard的准确性是否正确?
print('cnn succesfully built.')
if initial_weights is not None:
assert len(params) == len(initial_weights)
assign_ops = [w.assign(v) for w,v in zip(params,initial_weights)]
init = tf.global_variables_initializer()
def vec_to_plate(v):
return "".join(common.CHARS[i] for i in v)
do_report()
def do_report():
r = sess.run([best,correct,tf.greater(y[:,0],0),y_[:,digits_loss,loss],feed_dict={x: test_xs,y_: test_ys})
num_correct = numpy.sum(
numpy.logical_or(
numpy.all(r[0] == r[1],axis=1),numpy.logical_and(r[2] < 0.5,r[3] < 0.5)))
r_short = (r[0][:batch_size],r[1][:batch_size],r[2][:batch_size],r[3][:batch_size])
for b,c,pb,pc in zip(*r_short):
print("{} {} <-> {} {}".format(vec_to_plate(c),pc,vec_to_plate(b),float(pb)))
num_p_correct = numpy.sum(r[2] == r[3])
print("batch {:3d} correct: {:2.02f}% presence: {:02.02f}% ".format(
batch_idx,100. * num_correct / (len(r[0])),100. * num_p_correct / len(r[2])))
print("loss: {} (digits: {},presence: {})".format(r[6],r[4],r[5]))
print("|{}|".format(
"".join("X "[numpy.array_equal(b,c) or (not pb and not pc)] for b,pc in
zip(*r_short))))
train_summary,_ = sess.run([merged,correct],feed_dict={x: batch_xs,y_: batch_ys,keep_prob:
1.0}) #train_accuracy
train_writer.add_summary(train_summary,batch_idx)
#print("step %s,training accuracy %s" % (batch_idx,train_accuracy))
test_summary,y_: test_ys,keep_prob: 1.0}) #test_accuracy
test_writer.add_summary(test_summary,batch_idx)
#print("test accuracy %g" % test_accuracy)
# merge all TensorBoard outputs
merged = tf.summary.merge_all()
do_batch()
def do_batch():
sess.run(train_step,y_: batch_ys})
if batch_idx % report_steps == 0: # ehlel
do_report()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.96) ## memory growth
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(init)
if initial_weights is not None:
sess.run(assign_ops)
test_xs,test_ys = unzip(list(read_data("generated_plates/*.jpg"))[:batch_size])
try:
start_time = time.time()
last_batch_idx = 0
last_batch_time = time.time()
batch_iter = enumerate(read_batches(batch_size))
for batch_idx,(batch_xs,batch_ys) in batch_iter:
do_batch()
if batch_idx % report_steps == 0: # report_step =10 bga
batch_time = time.time()
if last_batch_idx != batch_idx:
time_for_batches = (100 * (last_batch_time - batch_time) /
(last_batch_idx - batch_idx))
print("time for 100 batches {}".format(time_for_batches))
print("now: ",time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
elapsed_time = time.time() - start_time
print("Total train time: ",time.strftime("%H:%M:%S",time.gmtime(
elapsed_time))) # total train hugatsaa toologch nemsen
last_batch_idx = batch_idx
last_batch_time = batch_time
# output metadata at 1000 epochs
if batch_idx % mega_report_steps == 0:
print('TensorBoard Adding run metadata for epoch ' + str(batch_idx))
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.Runmetadata()
summary,train_step],keep_prob: 1.0},options=run_options,run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata,'step%03d' % batch_idx)
train_writer.add_summary(summary,batch_idx)
except KeyboardInterrupt:
last_weights = [p.eval() for p in params]
numpy.savez("TW.npz",*last_weights)
# output metadata at end of epochs
print('Train ending ... TensorBoard Adding run metadata for epoch ' + str(batch_idx))
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.Runmetadata()
summary,run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata,'step%03d' % batch_idx)
train_writer.add_summary(summary,batch_idx)
# prepare checkpoint writer
saver = tf.compat.v1.train.Saver() # no variable to save baisan end avchirch zassan
print('\nSaving model at ' + str(batch_idx) + ' epochs.')
saver.save(sess,output_directory + "/model_at_" + str(batch_idx) + "_epochs.ckpt",global_step=batch_idx)
# close writers
train_writer.close()
test_writer.close()
# end graph nemsen
graph_writer.add_graph(sess.graph)
graph_writer.close()
print('\nRun " tensorboard --logdir=' + output_directory + '
" to see result on localhost:6006')
return last_weights
请帮助我? 如何将准确性数据写入TensorBoard?
错误:
Original stack trace for 'correct/correct':
File "training.py",line 414,in <module>
initial_weights=initial_weights)
File "training.py",line 181,in train
tf.summary.scalar("correct",correct)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\summary\summary.py",line 82,in scalar
val = _gen_logging_ops.scalar_summary(tags=tag,values=tensor,name=scope)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\ops\gen_logging_ops.py",line 777,in
scalar_summary
"ScalarSummary",tags=tags,values=values,name=name)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\framework\op_def_library.py",line 788,in
_apply_op_helper
op_def=op_def)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\util\deprecation.py",line 507,in new_func
return func(*args,**kwargs)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\framework\ops.py",line 3616,in create_op
op_def=op_def)
File "C:\Users\EL\Anaconda3\envs\gputest\lib\site-
packages\tensorflow\python\framework\ops.py",line 2005,in __init__
self._traceback = tf_stack.extract_stack()