Demo entry 6657837

1

   

Submitted by anonymous on Nov 05, 2017 at 07:39
Language: Python 3. Code size: 6.7 kB.

# -*- coding: utf-8 -*-
from skimage import io,transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
import re

def read_images(image_path,image_list):
    images_list=os.listdir(image_path)
    images_list.sort()
    images=[]
    for i in image_list:
        img=io.imread(image_path+i+".bmp")
        img=transform.resize(img,(160,200,3))
        images.append(img)	
    return np.asarray(images,np.float32)

def read_labels(label_path,label_name_path):
    #get labels_name
    label_name_file=open(label_name_path,'r')
    name_content=label_name_file.read()
    label_name_all_in_one_list=re.findall("TL\d+",name_content)
    labels_name=np.asarray(label_name_all_in_one_list,np.str)

    #get labels
    labelfile=open(label_path,'r')
    content=labelfile.read()
    label_all_in_one_list=re.findall("\d*\.?\d+",content)
    labelfile.close()
    labels=np.asarray(label_all_in_one_list,np.float32).reshape(labels_name.shape[0],10)

    return labels,labels_name

def creat_gender_labels(labels):
    labels_gender=[]
    for i in range(0,labels.shape[0]):
        if labels[i][2]==1:
            labels_gender.append([1,0])
        elif labels[i][2]==2:
            labels_gender.append([0,1])
        else:
            print("age error at "+i)
            os._exit(0)
    return np.asarray(labels_gender,np.int)

def creat_BMI_labels(labels):
    labels_BMI=[]
    labels_BMI_temp=[]
    for i in range(0,labels.shape[0]):
        BMI=[]
        BMI.append((labels[i][0]-sum(labels[:,0])/labels.shape[0])  / np.std(labels[:,0],ddof=1))
        labels_BMI.append(BMI)         
    return np.asarray(labels_BMI,np.float32)
    
def creat_AGE_labels(labels):
    labels_AGE=[]
    labels_AGE_temp=[]
    for i in range(0,labels.shape[0]):
        AGE=[]
        AGE.append((labels[i][1]-sum(labels[:,1])/labels.shape[0])  / np.std(labels[:,1],ddof=1))
        labels_AGE.append(AGE)    
    return np.asarray(labels_AGE,np.float32)

def ceat_BP_(labels):
    labels_SBP=[]
    labels_DBP=[]
    for i in range(0,labels.shape[0]):
        SBP=[]
        DBP=[]
        SBP.append( (labels[i][3]-sum(labels[:,3])/labels.shape[0])  / np.std(labels[:,3],ddof=1) )
        DBP.append( (labels[i][4]-sum(labels[:,4])/labels.shape[0])  / np.std(labels[:,4],ddof=1) )
        labels_SBP.append(SBP)
        labels_DBP.append(DBP)
    return np.asarray(labels_SBP,np.float32),np.asarray(labels_DBP,np.float32)
#read data by minibacth randomly but not repetitively
def minibatches(images_list,labels,image_path,batch_size,shuffle=False):
    assert len(images_list) == len(labels)
    if shuffle:
        indices = np.arange(len(images_list))
        np.random.shuffle(indices)
    for start_index in range(0,len(images_list)-batch_size+1,batch_size):
        if shuffle:
            excerpt=indices[start_index:start_index+batch_size]
        else:
            excerpt = slice(start_index, start_index + batch_size)
        yield read_images(image_path,images_list[excerpt]),labels[excerpt]

#read data
image_path='d:/2D/imageout400-320/'
labels_path='c:/deeplearning/label/label.csv'
labels_name_path='c:/deeplearning/label/labels_name'
labels,labels_name=read_labels(labels_path,labels_name_path)
labels_AGE=creat_AGE_labels(labels)
split=2400
test_images_name=labels_name[split:2640]
test_labels_AGE=labels_AGE[split:2640]
train_images_name=labels_name[0:split]
train_labels_AGE=labels_AGE[0:split]

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')

sess = tf.InteractiveSession()

x = tf.placeholder("float", shape=[None,160,200,3],name='image_input')
y_ = tf.placeholder("float", shape=[None,1],name='label_input')

W_conv1 = weight_variable([9, 9, 3, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,160,200,3])
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 2, 2, 1], padding='SAME') + b_conv1)#80*100*32
h_pool1 = max_pool_2x2(h_conv1)#40*50*32
        
    
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 2, 2, 1], padding='SAME') + b_conv2)#20*25*64
h_pool2 = max_pool_2x2(h_conv2)#10*13*64

W_conv3 = weight_variable([3, 3, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.relu(tf.nn.conv2d(h_pool2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)#10*13*128

keep_prob = tf.placeholder("float")
h_conv3_flat = tf.reshape(h_conv3, [-1, 10 * 13 * 128])

W_fc1 = weight_variable([10 * 13 * 128, 512])
b_fc1 = bias_variable([512])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)


W_fc2 = weight_variable([512, 64])
b_fc2 = bias_variable([64])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)


W_fc3 = weight_variable([64, 1])
b_fc3 = bias_variable([1])
y_conv=tf.matmul(h_fc2_drop, W_fc3) + b_fc3
        
loss = tf.reduce_mean(tf.reduce_sum( tf.square(y_ - y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-5).minimize(loss)
sess.run(tf.global_variables_initializer())

for i in range(100):
    print_acc=0
    for x_image,y_label in minibatches(train_images_name,train_labels_AGE,image_path,10,shuffle=True):
        print_acc+=1
        if print_acc%10==0:
            train_loss=loss.eval(feed_dict={x:x_image, y_:y_label, keep_prob:1.0})
            print("step %d loss %.8g"%(i, train_loss))
        train_step.run(feed_dict={x:x_image, y_:y_label, keep_prob: 0.8})
   
sumup=[]
correl1=[]  
correl2=[]
for i in range(0,len(test_images_name)):
    x_image=[]
    y_label=[]
    img=io.imread(image_path+test_images_name[i]+".bmp")
    img=transform.resize(img,(160,200,3))
    x_image.append(np.asarray(img,np.float32))
    y_label.append(test_labels_AGE[i])
    
    each_loss=loss.eval(feed_dict={x:x_image, y_:y_label, keep_prob: 1.0})
    each_prediction=y_conv.eval(feed_dict={x:x_image, y_:y_label, keep_prob: 1.0})
    
    sumup.append(each_loss)
    correl1.append(y_label[0][0])
    correl2.append(each_prediction[0][0])
    
print ("test loss "+str((sum(sumup)/len(sumup))))
correl=[correl1,correl2]
print("test acurracy "+str(np.corrcoef(correl)))

This snippet took 0.02 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).