Something is disastrously wrong with my neural network and what it's produced
$begingroup$
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I am trying to generate a synthetic image of a cat from my own database of cat photos that I compiled using a crawler. I am using an adapted code originally intended for the MNSIT handwritten digits database (hence the shape of the grid).
The network doesn't appear to be training, generating or discriminating properly because the epochs aren't taking long at all and what is being produced is very poor.
To be clear, I've tried to adapt another author's code that I found online and I've added other snippets of code to try to get it to work. It's evident that my 'FrankenNet' has fallen to bits and my 'bolt it together and see what happens' approach has its limitations. In the future I plan to be more efficient and logical with how I learn Python because my experimental method has proved to be both time consuming and unpredictable.
Maybe I haven't loaded in the data correctly or perhaps there are a few other issues such as converting the data to a numpy array?
For the simple fact that I don't know exactly what is causing this (I have limited experience in programming), and because there are no exceptions raised when I run the program, I will offer the entire code below.
I'd love some advice because I really want to generate something and I've spent a long time trying to work it out through trial and error with no results. I'd especially appreciate some specific suggestions about what lines I need to change, add or remove to get this beast up to scratch.
Thank you for your time!
import os
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from tqdm import tqdm
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras import initializers
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(10)
random_dim = 100
from os import listdir
from PIL import Image as PImage
def loadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages =
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
DATASET_NAME = 'cats'
ROOT_DIR = '/Users/Darren/desktop'
DATASET_DIR = f'{ROOT_DIR}/{DATASET_NAME}'
input_files = [os.path.join(dp, f) for dp, dn, fn in
os.walk(os.path.expanduser(f'{DATASET_DIR}/processed')) for f in fn
if f != '.DS_Store']
imgs = np.ndarray(shape=(len(input_files), 100, 100, 3),
dtype=np.int)
for i, input_file in enumerate(input_files):
# print('processing file: {}'.format(input_file))
image = imread(input_file)
imgs[i] = image
# your images in an array
imgs = loadImages(path)
PATH = os.getcwd()
train_path = PATH + '/cats/train'
train_batch = os.listdir(train_path)
x_train =
# if data are in form of images
img_path = train_path
test_path = PATH + '/cats/test'
test_batch = os.listdir(test_path)
x_test =
# finally converting list into numpy array
x_train = np.array(x_train)
x_test = np.array(x_test)
def get_optimizer():
return Adam(lr=0.0002, beta_1=0.5)
def get_generator(optimizer):
generator = Sequential()
generator.add(Dense(256, input_dim=random_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
return generator
def get_discriminator(optimizer):
discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
discriminator.trainable = False
gan_input = Input(shape=(random_dim,))
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer=optimizer)
return gan
def plot_generated_images(epoch, generator, examples=100, dim=(10, 10),
figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, random_dim])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image_epoch_%d.png' % epoch)
def train(epochs=1, batch_size=128):
batch_count = x_train.shape[0] // batch_size
adam = get_optimizer()
generator = get_generator(adam)
discriminator = get_discriminator(adam)
gan = get_gan_network(discriminator, random_dim, generator, adam)
for e in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % e, '-' * 15)
for _ in tqdm(range(batch_count)):
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]
generated_images = generator.predict(noise)
X = np.concatenate([image_batch, generated_images])
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis)
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
y_gen = np.ones(batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y_gen)
if e == 1 or e % 20 == 0:
plot_generated_images(e, generator)
if __name__ == '__main__':
train(400, 128)
python tensorflow
New contributor
$endgroup$
add a comment |
$begingroup$
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I am trying to generate a synthetic image of a cat from my own database of cat photos that I compiled using a crawler. I am using an adapted code originally intended for the MNSIT handwritten digits database (hence the shape of the grid).
The network doesn't appear to be training, generating or discriminating properly because the epochs aren't taking long at all and what is being produced is very poor.
To be clear, I've tried to adapt another author's code that I found online and I've added other snippets of code to try to get it to work. It's evident that my 'FrankenNet' has fallen to bits and my 'bolt it together and see what happens' approach has its limitations. In the future I plan to be more efficient and logical with how I learn Python because my experimental method has proved to be both time consuming and unpredictable.
Maybe I haven't loaded in the data correctly or perhaps there are a few other issues such as converting the data to a numpy array?
For the simple fact that I don't know exactly what is causing this (I have limited experience in programming), and because there are no exceptions raised when I run the program, I will offer the entire code below.
I'd love some advice because I really want to generate something and I've spent a long time trying to work it out through trial and error with no results. I'd especially appreciate some specific suggestions about what lines I need to change, add or remove to get this beast up to scratch.
Thank you for your time!
import os
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from tqdm import tqdm
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras import initializers
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(10)
random_dim = 100
from os import listdir
from PIL import Image as PImage
def loadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages =
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
DATASET_NAME = 'cats'
ROOT_DIR = '/Users/Darren/desktop'
DATASET_DIR = f'{ROOT_DIR}/{DATASET_NAME}'
input_files = [os.path.join(dp, f) for dp, dn, fn in
os.walk(os.path.expanduser(f'{DATASET_DIR}/processed')) for f in fn
if f != '.DS_Store']
imgs = np.ndarray(shape=(len(input_files), 100, 100, 3),
dtype=np.int)
for i, input_file in enumerate(input_files):
# print('processing file: {}'.format(input_file))
image = imread(input_file)
imgs[i] = image
# your images in an array
imgs = loadImages(path)
PATH = os.getcwd()
train_path = PATH + '/cats/train'
train_batch = os.listdir(train_path)
x_train =
# if data are in form of images
img_path = train_path
test_path = PATH + '/cats/test'
test_batch = os.listdir(test_path)
x_test =
# finally converting list into numpy array
x_train = np.array(x_train)
x_test = np.array(x_test)
def get_optimizer():
return Adam(lr=0.0002, beta_1=0.5)
def get_generator(optimizer):
generator = Sequential()
generator.add(Dense(256, input_dim=random_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
return generator
def get_discriminator(optimizer):
discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
discriminator.trainable = False
gan_input = Input(shape=(random_dim,))
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer=optimizer)
return gan
def plot_generated_images(epoch, generator, examples=100, dim=(10, 10),
figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, random_dim])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image_epoch_%d.png' % epoch)
def train(epochs=1, batch_size=128):
batch_count = x_train.shape[0] // batch_size
adam = get_optimizer()
generator = get_generator(adam)
discriminator = get_discriminator(adam)
gan = get_gan_network(discriminator, random_dim, generator, adam)
for e in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % e, '-' * 15)
for _ in tqdm(range(batch_count)):
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]
generated_images = generator.predict(noise)
X = np.concatenate([image_batch, generated_images])
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis)
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
y_gen = np.ones(batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y_gen)
if e == 1 or e % 20 == 0:
plot_generated_images(e, generator)
if __name__ == '__main__':
train(400, 128)
python tensorflow
New contributor
$endgroup$
add a comment |
$begingroup$
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I am trying to generate a synthetic image of a cat from my own database of cat photos that I compiled using a crawler. I am using an adapted code originally intended for the MNSIT handwritten digits database (hence the shape of the grid).
The network doesn't appear to be training, generating or discriminating properly because the epochs aren't taking long at all and what is being produced is very poor.
To be clear, I've tried to adapt another author's code that I found online and I've added other snippets of code to try to get it to work. It's evident that my 'FrankenNet' has fallen to bits and my 'bolt it together and see what happens' approach has its limitations. In the future I plan to be more efficient and logical with how I learn Python because my experimental method has proved to be both time consuming and unpredictable.
Maybe I haven't loaded in the data correctly or perhaps there are a few other issues such as converting the data to a numpy array?
For the simple fact that I don't know exactly what is causing this (I have limited experience in programming), and because there are no exceptions raised when I run the program, I will offer the entire code below.
I'd love some advice because I really want to generate something and I've spent a long time trying to work it out through trial and error with no results. I'd especially appreciate some specific suggestions about what lines I need to change, add or remove to get this beast up to scratch.
Thank you for your time!
import os
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from tqdm import tqdm
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras import initializers
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(10)
random_dim = 100
from os import listdir
from PIL import Image as PImage
def loadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages =
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
DATASET_NAME = 'cats'
ROOT_DIR = '/Users/Darren/desktop'
DATASET_DIR = f'{ROOT_DIR}/{DATASET_NAME}'
input_files = [os.path.join(dp, f) for dp, dn, fn in
os.walk(os.path.expanduser(f'{DATASET_DIR}/processed')) for f in fn
if f != '.DS_Store']
imgs = np.ndarray(shape=(len(input_files), 100, 100, 3),
dtype=np.int)
for i, input_file in enumerate(input_files):
# print('processing file: {}'.format(input_file))
image = imread(input_file)
imgs[i] = image
# your images in an array
imgs = loadImages(path)
PATH = os.getcwd()
train_path = PATH + '/cats/train'
train_batch = os.listdir(train_path)
x_train =
# if data are in form of images
img_path = train_path
test_path = PATH + '/cats/test'
test_batch = os.listdir(test_path)
x_test =
# finally converting list into numpy array
x_train = np.array(x_train)
x_test = np.array(x_test)
def get_optimizer():
return Adam(lr=0.0002, beta_1=0.5)
def get_generator(optimizer):
generator = Sequential()
generator.add(Dense(256, input_dim=random_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
return generator
def get_discriminator(optimizer):
discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
discriminator.trainable = False
gan_input = Input(shape=(random_dim,))
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer=optimizer)
return gan
def plot_generated_images(epoch, generator, examples=100, dim=(10, 10),
figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, random_dim])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image_epoch_%d.png' % epoch)
def train(epochs=1, batch_size=128):
batch_count = x_train.shape[0] // batch_size
adam = get_optimizer()
generator = get_generator(adam)
discriminator = get_discriminator(adam)
gan = get_gan_network(discriminator, random_dim, generator, adam)
for e in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % e, '-' * 15)
for _ in tqdm(range(batch_count)):
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]
generated_images = generator.predict(noise)
X = np.concatenate([image_batch, generated_images])
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis)
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
y_gen = np.ones(batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y_gen)
if e == 1 or e % 20 == 0:
plot_generated_images(e, generator)
if __name__ == '__main__':
train(400, 128)
python tensorflow
New contributor
$endgroup$
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I just got a neural network to run and although it doesn't raise any exceptions, I'm left with a horrible mess after 80 to 100 epochs:
After 100 epochs:
I am trying to generate a synthetic image of a cat from my own database of cat photos that I compiled using a crawler. I am using an adapted code originally intended for the MNSIT handwritten digits database (hence the shape of the grid).
The network doesn't appear to be training, generating or discriminating properly because the epochs aren't taking long at all and what is being produced is very poor.
To be clear, I've tried to adapt another author's code that I found online and I've added other snippets of code to try to get it to work. It's evident that my 'FrankenNet' has fallen to bits and my 'bolt it together and see what happens' approach has its limitations. In the future I plan to be more efficient and logical with how I learn Python because my experimental method has proved to be both time consuming and unpredictable.
Maybe I haven't loaded in the data correctly or perhaps there are a few other issues such as converting the data to a numpy array?
For the simple fact that I don't know exactly what is causing this (I have limited experience in programming), and because there are no exceptions raised when I run the program, I will offer the entire code below.
I'd love some advice because I really want to generate something and I've spent a long time trying to work it out through trial and error with no results. I'd especially appreciate some specific suggestions about what lines I need to change, add or remove to get this beast up to scratch.
Thank you for your time!
import os
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from tqdm import tqdm
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras import initializers
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(10)
random_dim = 100
from os import listdir
from PIL import Image as PImage
def loadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages =
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
DATASET_NAME = 'cats'
ROOT_DIR = '/Users/Darren/desktop'
DATASET_DIR = f'{ROOT_DIR}/{DATASET_NAME}'
input_files = [os.path.join(dp, f) for dp, dn, fn in
os.walk(os.path.expanduser(f'{DATASET_DIR}/processed')) for f in fn
if f != '.DS_Store']
imgs = np.ndarray(shape=(len(input_files), 100, 100, 3),
dtype=np.int)
for i, input_file in enumerate(input_files):
# print('processing file: {}'.format(input_file))
image = imread(input_file)
imgs[i] = image
# your images in an array
imgs = loadImages(path)
PATH = os.getcwd()
train_path = PATH + '/cats/train'
train_batch = os.listdir(train_path)
x_train =
# if data are in form of images
img_path = train_path
test_path = PATH + '/cats/test'
test_batch = os.listdir(test_path)
x_test =
# finally converting list into numpy array
x_train = np.array(x_train)
x_test = np.array(x_test)
def get_optimizer():
return Adam(lr=0.0002, beta_1=0.5)
def get_generator(optimizer):
generator = Sequential()
generator.add(Dense(256, input_dim=random_dim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
return generator
def get_discriminator(optimizer):
discriminator = Sequential()
discriminator.add(Dense(1024, input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
return discriminator
def get_gan_network(discriminator, random_dim, generator, optimizer):
discriminator.trainable = False
gan_input = Input(shape=(random_dim,))
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer=optimizer)
return gan
def plot_generated_images(epoch, generator, examples=100, dim=(10, 10),
figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, random_dim])
generated_images = generator.predict(noise)
generated_images = generated_images.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image_epoch_%d.png' % epoch)
def train(epochs=1, batch_size=128):
batch_count = x_train.shape[0] // batch_size
adam = get_optimizer()
generator = get_generator(adam)
discriminator = get_discriminator(adam)
gan = get_gan_network(discriminator, random_dim, generator, adam)
for e in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % e, '-' * 15)
for _ in tqdm(range(batch_count)):
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]
generated_images = generator.predict(noise)
X = np.concatenate([image_batch, generated_images])
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis)
noise = np.random.normal(0, 1, size=[batch_size, random_dim])
y_gen = np.ones(batch_size)
discriminator.trainable = False
gan.train_on_batch(noise, y_gen)
if e == 1 or e % 20 == 0:
plot_generated_images(e, generator)
if __name__ == '__main__':
train(400, 128)
python tensorflow
python tensorflow
New contributor
New contributor
New contributor
asked 3 mins ago
DarrenDarren
1
1
New contributor
New contributor
add a comment |
add a comment |
0
active
oldest
votes
Your Answer
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "557"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: false,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: null,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Darren is a new contributor. Be nice, and check out our Code of Conduct.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f49446%2fsomething-is-disastrously-wrong-with-my-neural-network-and-what-its-produced%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
0
active
oldest
votes
0
active
oldest
votes
active
oldest
votes
active
oldest
votes
Darren is a new contributor. Be nice, and check out our Code of Conduct.
Darren is a new contributor. Be nice, and check out our Code of Conduct.
Darren is a new contributor. Be nice, and check out our Code of Conduct.
Darren is a new contributor. Be nice, and check out our Code of Conduct.
Thanks for contributing an answer to Data Science Stack Exchange!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
Use MathJax to format equations. MathJax reference.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f49446%2fsomething-is-disastrously-wrong-with-my-neural-network-and-what-its-produced%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown