• 大小: 8KB
    文件类型: .py
    金币: 1
    下载: 0 次
    发布日期: 2021-05-27
  • 语言: Python
  • 标签: CNN  

资源简介

基于星系图像分类的,CNN模板的源代码资源,python实现使用tensorflow框架

资源截图

代码片段和文件信息

# coding=utf-8

import tensorflow as tf
import pickle
import random

# defines the size of the batch.
BATCH_SIZE = 40
# one channel in our grayscale images.
NUM_CHANNELS = 1
# The random seed that defines initialization.
SEED = 42223

IMAGE_SIZE = 64

NUM_LABELS = 2

import numpy as np
from PIL import Image
import os

class_num = 2
img1_num = 10000
img2_num = 10000
img1_test_num = 1000
img2_test_num = 1000
train_num = img1_num + img2_num
test_total_num = img1_test_num + img2_test_num

image_height = 64
image_width = 64
image_channle = 1

data_path1 = ‘/home/skywalker/桌面/cnn/1.1‘
data_path2 = ‘/home/skywalker/桌面/cnn/2.1‘
#data_path3 = ‘/Users/xinruyue/Desktop/python_test/3‘

def ImageToMatrix(filename):
    im = Image.open(filename)
    widthheight = im.size
    im = im.convert(“L“)
    data = im.getdata()
    data = np.matrix(data)
    #new_data = np.reshape(data(widthheight))
    new_data = np.reshape(data(heightwidth))
    return new_data

for root dirs files in os.walk(data_path1):
    f1 = files
img1 = []
for each in f1:
    path = os.path.join(data_path1each)
    img1.append(ImageToMatrix(path))
print(len(img1))

for root dirs files in os.walk(data_path2):
    f2 = files
img2 = []
for each in f2:
    path = os.path.join(data_path2each)
    img2.append(ImageToMatrix(path))
print(len(img2))

‘‘‘
for root dirs files in os.walk(data_path3):
    f3 = files
img3 = []
for each in f3:
    path = os.path.join(data_path3each)
    img3.append(ImageToMatrix(path))
‘‘‘

dummy_train_data = img1[:img1_num] + img2[:img2_num]

dummy_train_labels = np.zeros((train_numclass_num))
dummy_train_labels[:img1_num 0 ] = 1
dummy_train_labels[img2_num: 1 ] = 1
#dummy_train_labels[3000: 2 ] = 1

data_label_pair = list(zip(dummy_train_data dummy_train_labels))
random.shuffle(data_label_pair)

train_data_temp = list(zip(*data_label_pair))[0]
train_labels_temp = list(zip(*data_label_pair))[1]
print(len(train_data_temp))
train_data = np.array(train_data_temp).reshape((train_numimage_heightimage_widthimage_channle)).astype(np.float32)
train_labels = np.array(train_labels_temp)

train_size = train_labels.shape[0]

# prepare test datas and labels
dummy_test_data = img1[img1_num:img1_num + img1_test_num] + img2[img2_num:img2_num + img2_test_num]

dummy_test_labels = np.zeros((test_total_numclass_num))
dummy_test_labels[:img1_test_num 0 ] = 1
dummy_test_labels[img2_test_num: 1 ] = 1
#dummy_test_labels[400: 2 ] = 1

test_data_label_pair = list(zip(dummy_test_data dummy_test_labels))
random.shuffle(test_data_label_pair)

test_data_temp = list(zip(*test_data_label_pair))[0]
test_labels_temp = list(zip(*test_data_label_pair))[1]
print len(test_data_temp)
test_data = np.array(test_data_temp).reshape((test_total_numimage_heightimage_widthimage_channle)).astype(np.float32)
test_labels = np.array(test_labels_temp)

train_data_node = tf.placeholder(
  tf.float32
  shape=(BATCH_SIZE IMAGE_SIZE IMAGE_SIZE NUM_CHANNELS))
train_labels_node = tf.placeholder(

评论

共有 条评论