首页 > 解决方案 > 将张量转换为 numpy 数组

问题描述

我编写了以下代码,用于使用深度 CNN usinf tensorflow 从两个图像中提取特征:

# -*- coding: utf-8 -*-
# Implementation of Wang et al 2017: Automatic Brain Tumor Segmentation using Cascaded Anisotropic Convolutional Neural Networks. https://arxiv.org/abs/1709.00382

# Author: Guotai Wang
# Copyright (c) 2017-2018 University College London, United Kingdom. All rights reserved.
# http://cmictig.cs.ucl.ac.uk
#
# Distributed under the BSD-3 licence. Please see the file licence.txt
# This software is not certified for clinical use.
#
from __future__ import absolute_import, print_function
import numpy as np
from scipy import ndimage
import time
import os
import sys
import pickle
import tensorflow as tf
from tensorflow.contrib.data import Iterator
from util.data_loader import *
from util.data_process import *
from util.train_test_func import *
from util.parse_config import parse_config
from train import NetFactory
print("import finished")
def test(config_file):
    # 1, load configure file
    config = parse_config(config_file)
    config_data = config['data']
    config_net1 = config.get('network1', None)
    config_net2 = config.get('network2', None)
    config_net3 = config.get('network3', None)
    config_test = config['testing']  
    batch_size  = config_test.get('batch_size', 5)
    print("configure file loaded")

    # 2.1, network for whole tumor
    if(config_net1):
        net_type1    = config_net1['net_type']
        net_name1    = config_net1['net_name']
        data_shape1  = config_net1['data_shape']
        label_shape1 = config_net1['label_shape']
        class_num1   = config_net1['class_num']
        print("configure file of whole tumor is loaded")

        # construct graph for 1st network
        full_data_shape1 = [batch_size] + data_shape1
        x1 = tf.placeholder(tf.float32, shape = full_data_shape1)          
        net_class1 = NetFactory.create(net_type1)
        net1 = net_class1(num_classes = class_num1,w_regularizer = None,
                    b_regularizer = None, name = net_name1)
        net1.set_params(config_net1)
        predicty1, caty1 = net1(x1, is_training = True)
        proby1 = tf.nn.softmax(predicty1)
    else:
        config_net1ax = config['network1ax']
        config_net1sg = config['network1sg']
        config_net1cr = config['network1cr']
        print("configure files of whole tumor in three planes are loaded")

        # construct graph for 1st network axial
        net_type1ax    = config_net1ax['net_type']
        net_name1ax    = config_net1ax['net_name']
        data_shape1ax  = config_net1ax['data_shape']
        label_shape1ax = config_net1ax['label_shape']
        class_num1ax   = config_net1ax['class_num']

        full_data_shape1ax = [batch_size] + data_shape1ax
        x1ax = tf.placeholder(tf.float32, shape = full_data_shape1ax)          
        net_class1ax = NetFactory.create(net_type1ax)
        net1ax = net_class1ax(num_classes = class_num1ax,w_regularizer = None,
                    b_regularizer = None, name = net_name1ax)
        net1ax.set_params(config_net1ax)
        predicty1ax, caty1ax = net1ax(x1ax, is_training = True)
        proby1ax = tf.nn.softmax(predicty1ax)
        print("graph for 1st network1ax is constructed")

        # construct graph for 1st network sagittal
        net_type1sg    = config_net1sg['net_type']
        net_name1sg    = config_net1sg['net_name']
        data_shape1sg  = config_net1sg['data_shape']
        label_shape1sg = config_net1sg['label_shape']
        class_num1sg   = config_net1sg['class_num']

        full_data_shape1sg = [batch_size] + data_shape1sg
        x1sg = tf.placeholder(tf.float32, shape = full_data_shape1sg)          
        net_class1sg = NetFactory.create(net_type1sg)
        net1sg = net_class1sg(num_classes = class_num1sg,w_regularizer = None,
                    b_regularizer = None, name = net_name1sg)
        net1sg.set_params(config_net1sg)
        predicty1sg, caty1sg = net1sg(x1sg, is_training = True)
        proby1sg = tf.nn.softmax(predicty1sg)
        print("graph for 1st network1sg is constructed")

        # construct graph for 1st network coronal
        net_type1cr    = config_net1cr['net_type']
        net_name1cr    = config_net1cr['net_name']
        data_shape1cr  = config_net1cr['data_shape']
        label_shape1cr = config_net1cr['label_shape']
        class_num1cr   = config_net1cr['class_num']

        full_data_shape1cr = [batch_size] + data_shape1cr
        x1cr = tf.placeholder(tf.float32, shape = full_data_shape1cr)          
        net_class1cr = NetFactory.create(net_type1cr)
        net1cr = net_class1cr(num_classes = class_num1cr,w_regularizer = None,
                    b_regularizer = None, name = net_name1cr)
        net1cr.set_params(config_net1cr)
        predicty1cr, caty1cr = net1cr(x1cr, is_training = True)
        proby1cr = tf.nn.softmax(predicty1cr)
        print("graph for 1st network1cr is constructed")

    # 3, create session and load trained models
    all_vars = tf.global_variables()
    sess = tf.InteractiveSession()   
    sess.run(tf.global_variables_initializer())  
    if(config_net1):
        net1_vars = [x for x in all_vars if x.name[0:len(net_name1) + 1]==net_name1 + '/']
        saver1 = tf.train.Saver(net1_vars)
        saver1.restore(sess, config_net1['model_file'])
    else:
        net1ax_vars = [x for x in all_vars if x.name[0:len(net_name1ax) + 1]==net_name1ax + '/']
        saver1ax = tf.train.Saver(net1ax_vars)
        saver1ax.restore(sess, config_net1ax['model_file'])
        net1sg_vars = [x for x in all_vars if x.name[0:len(net_name1sg) + 1]==net_name1sg + '/']
        saver1sg = tf.train.Saver(net1sg_vars)
        saver1sg.restore(sess, config_net1sg['model_file'])     
        net1cr_vars = [x for x in all_vars if x.name[0:len(net_name1cr) + 1]==net_name1cr + '/']
        saver1cr = tf.train.Saver(net1cr_vars)
        saver1cr.restore(sess, config_net1cr['model_file'])
        print("all variables of net1 is saved")

    # 4, load test images
    dataloader = DataLoader(config_data)
    dataloader.load_data()
    image_num = dataloader.get_total_image_number()

    # 5, start to test
    test_slice_direction = config_test.get('test_slice_direction', 'all')
    save_folder = config_data['save_folder']
    test_time = []
    struct = ndimage.generate_binary_structure(3, 2)
    margin = config_test.get('roi_patch_margin', 5)

    x=['x1','x2']
    paddings=tf.constant([[0,0],[0,0],[10,10],[0,0],[0,0]])
    for i in range(image_num):
        [temp_imgs, temp_weight, temp_name, img_names, temp_bbox, temp_size] = dataloader.get_image_data_with_name(i)
        t0 = time.time()
        # 5.1, test of 1st network
        if(config_net1):
            data_shapes  = [ data_shape1[:-1],  data_shape1[:-1],  data_shape1[:-1]]
            label_shapes = [label_shape1[:-1], label_shape1[:-1], label_shape1[:-1]]
            nets = [net1, net1, net1]
            outputs = [proby1, proby1, proby1]
            inputs =  [x1, x1, x1]
            class_num = class_num1
        else:
            data_shapes  = [ data_shape1ax[:-1],  data_shape1sg[:-1],  data_shape1cr[:-1]]
            label_shapes = [label_shape1ax[:-1], label_shape1sg[:-1], label_shape1cr[:-1]]
            nets = [net1ax, net1sg, net1cr]
            outputs = [proby1ax, proby1sg, proby1cr]
            inputs =  [x1ax, x1sg, x1cr]
            class_num = class_num1ax
        predi=tf.concat([predicty1ax,tf.reshape(predicty1sg,[5,11,180,160,2]),tf.pad(predicty1cr,paddings,"CONSTANT")],0)
        cati=tf.concat([caty1ax,tf.reshape(caty1sg,[5,11,180,160,14]),tf.pad(caty1cr,paddings,"CONSTANT")],0)
        prob1 = test_one_image_three_nets_adaptive_shape(temp_imgs, data_shapes, label_shapes, data_shape1ax[-1], class_num,
                   batch_size, sess, nets, outputs, inputs, shape_mode = 0)
        pred1 =  np.asarray(np.argmax(prob1, axis = 3), np.uint16)
        pred1 = pred1 * temp_weight
        print("net1 is tested")
        globals()[x[i]]=predi
        test_time.append(time.time() - t0)
        print(temp_name)
    test_time = np.asarray(test_time)
    print('test time', test_time.mean())
    np.savetxt(save_folder + '/test_time.txt', test_time)

if __name__ == '__main__':
    if(len(sys.argv) != 2):
        print('Number of arguments should be 2. e.g.')
        print('    python test.py config17/test_all_class.txt')
        exit()
    config_file = str(sys.argv[1])
    assert(os.path.isfile(config_file))
    test(config_file)
    y=tf.stack([x1,x2],0)
    z=tf.Session().run(y)

输出是tensor(y)我想将其转换为 numpy 数组,tf.Session().run()但我收到此错误:

InvalidArgumentError(参见上面的回溯):您必须为占位符张量“Placeholder”提供一个值,其 dtype 为 float 和 shape [5,19,180,160,4] [[Node: Placeholder = Placeholderdtype=DT_FLOAT, shape=[5,19,180,160,4] , _device="/job:localhost/replica:0/task:0/device:GPU:0"]]

标签: numpytensorflow

解决方案


请注意,这个答案是基于对水晶球的深入了解,预测代码,这似乎是分类的——至少没有写在问题本身中。

看看错误信息:

InvalidArgumentError(参见上面的回溯):您必须为占位符张量提供一个值

这正是您的代码有什么问题。修剪下来,您的代码基本上只是(有很多问题):

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3])
y = tf.layers.dense(x1, 2)

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

print(tf.Session().run(y))

y 在不知道 的值的情况下无法评估输出张量x1,因为它取决于该值。

1.修正使用正确的命名

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

print(tf.Session().run(y))

现在错误消息变得更加清晰

tensorflow.python.framework.errors_impl.InvalidArgumentError:您必须使用dtype float 和 shape [?,3]为占位符张量“ my_input ”提供一个值

2.修复:提供一个feed_dict

为了让 TensorFlow 知道计算y应该基于哪个值,您需要将其输入到图中:

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
np_result = tf.Session().run(y, feed_dict={x1: [[42, 43, 44]]})

现在,这揭示了您的代码的第二个问题。您有2 个会话:

  • sess = tf.InteractiveSession()(session_a)
  • tf.Session()tf.Session().run()(会话_b)

现在,session_a获取所有初始化变量,因为您的代码包含

sess.run(tf.global_variables_initializer())

但是,在tf.Session().run(...)创建另一个会话期间,会留下一条新的错误消息:

FailedPreconditionError(参见上面的回溯):尝试使用未初始化的值...

3. 修复:只使用一个会话

import tensorflow as tf

x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
np_result = sess.run(y, feed_dict={x1: [[42, 43, 44]]})

并提供最佳解决方案:

import tensorflow as tf

# construct graph somewhere
x1 = tf.placeholder(tf.float32, [None, 3], name='my_input')
y = tf.layers.dense(x1, 2, name='fc1')

with tf.Session() as sess:
    # init variables / or load them
    sess.run(tf.global_variables_initializer())
    # make sure, that no operations willl be added to the graph
    sess.graph.finalize()

    # fetch result as numpy array
    np_result = sess.run(y, feed_dict={x1: [[42, 43, 44]]})

您自己编写或从某处复制的代码是“如何不在TensorFlow 中编写”的最佳演示。

最后一句话:

TensorFlow 强制你创建一个干净的结构。这个很重要。遵循这种结构应该成为一种习惯。一段时间后,您会立即看到这些部分,闻起来像是糟糕的代码。

如果您使用整个网络,则只需替换tf.layers.densemy_network_definitionand

def my_network_definition(x1):
    output = ...
    return output

在 pytorch 中,您可以使用问题中提供的任意样式进行编写。不是说,你应该这样做。但这是可能的。因此,请尝试遵循 TensorFlow 对您的期望。

亲爱的 pytorch 用户,我期待您的反馈。


推荐阅读