導入 matplotlib 模塊:html
import matplotlib
查看本身版本所支持的backends:app
print(matplotlib.rcsetup.all_backends)
返回信息:dom
['GTK3Agg', 'GTK3Cairo', 'MacOSX', 'nbAgg', 'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo', 'TkAgg', 'TkCairo', 'WebAgg', 'WX', 'WXAgg', 'WXCairo', 'agg', 'cairo', 'pdf', 'pgf', 'ps', 'svg', 'template']
查看當前工做的matplotlibrc文件是哪一個:svg
print(matplotlib.matplotlib_fname())
返回信息:spa
D:\ProgramData\Anaconda2\lib\site-packages\matplotlib\mpl-data\matplotlibrc
打開 matplotlibrc 查看相應內容: .net
將 backend 修改成 TkAgg:3d
執行以下代碼:code
import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.models import Model from keras.layers import Dense, Activation, Input, Reshape from keras.layers import Conv1D, Flatten, Dropout from keras.optimizers import SGD, Adam def sample_data(n_samples=10000, x_vals=np.arange(0, 5, .1), max_offset=100, mul_range=[1, 2]): vectors = [] for i in range(n_samples): offset = np.random.random() * max_offset mul = mul_range[0] + np.random.random() * (mul_range[1] - mul_range[0]) vectors.append( np.sin(offset + x_vals * mul) / 2 + .5 ) return np.array(vectors) ax = pd.DataFrame(np.transpose(sample_data(5))).plot() plt.show()
生成圖像:orm
執行代碼:htm
def get_generative(G_in, dense_dim=200, out_dim=50, lr=1e-3): x = Dense(dense_dim)(G_in) x = Activation('tanh')(x) G_out = Dense(out_dim, activation='tanh')(x) G = Model(G_in, G_out) opt = SGD(lr=lr) G.compile(loss='binary_crossentropy', optimizer=opt) return G, G_out G_in = Input(shape=[10]) G, G_out = get_generative(G_in) G.summary()
生成圖像:
執行代碼:
def get_discriminative(D_in, lr=1e-3, drate=.25, n_channels=50, conv_sz=5, leak=.2): x = Reshape((-1, 1))(D_in) x = Conv1D(n_channels, conv_sz, activation='relu')(x) x = Dropout(drate)(x) x = Flatten()(x) x = Dense(n_channels)(x) D_out = Dense(2, activation='sigmoid')(x) D = Model(D_in, D_out) dopt = Adam(lr=lr) D.compile(loss='binary_crossentropy', optimizer=dopt) return D, D_out D_in = Input(shape=[50]) D, D_out = get_discriminative(D_in) D.summary()
生成圖像:
執行代碼:
def set_trainability(model, trainable=False): model.trainable = trainable for layer in model.layers: layer.trainable = trainable def make_gan(GAN_in, G, D): set_trainability(D, False) x = G(GAN_in) GAN_out = D(x) GAN = Model(GAN_in, GAN_out) GAN.compile(loss='binary_crossentropy', optimizer=G.optimizer) return GAN, GAN_out GAN_in = Input([10]) GAN, GAN_out = make_gan(GAN_in, G, D) GAN.summary()
生成圖像:
執行代碼:
def sample_data_and_gen(G, noise_dim=10, n_samples=10000): XT = sample_data(n_samples=n_samples) XN_noise = np.random.uniform(0, 1, size=[n_samples, noise_dim]) XN = G.predict(XN_noise) X = np.concatenate((XT, XN)) y = np.zeros((2*n_samples, 2)) y[:n_samples, 1] = 1 y[n_samples:, 0] = 1 return X, y def pretrain(G, D, noise_dim=10, n_samples=10000, batch_size=32): X, y = sample_data_and_gen(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, True) D.fit(X, y, epochs=1, batch_size=batch_size) pretrain(G, D)
返回信息:
Epoch 1/1 32/20000 [..............................] - ETA: 6:42 - loss: 0.7347 288/20000 [..............................] - ETA: 47s - loss: 0.4808 544/20000 [..............................] - ETA: 26s - loss: 0.3318 800/20000 [>.............................] - ETA: 19s - loss: 0.2359 1056/20000 [>.............................] - ETA: 15s - loss: 0.1805 1312/20000 [>.............................] - ETA: 12s - loss: 0.1459 1568/20000 [=>............................] - ETA: 11s - loss: 0.1223 1824/20000 [=>............................] - ETA: 10s - loss: 0.1053 2048/20000 [==>...........................] - ETA: 9s - loss: 0.0938 2272/20000 [==>...........................] - ETA: 8s - loss: 0.0847 2528/20000 [==>...........................] - ETA: 8s - loss: 0.0761 2784/20000 [===>..........................] - ETA: 7s - loss: 0.0692 3040/20000 [===>..........................] - ETA: 7s - loss: 0.0634 3296/20000 [===>..........................] - ETA: 6s - loss: 0.0585 3552/20000 [====>.........................] - ETA: 6s - loss: 0.0543 3808/20000 [====>.........................] - ETA: 6s - loss: 0.0507 4064/20000 [=====>........................] - ETA: 5s - loss: 0.0475 4352/20000 [=====>........................] - ETA: 5s - loss: 0.0444 4608/20000 [=====>........................] - ETA: 5s - loss: 0.0420 4864/20000 [======>.......................] - ETA: 5s - loss: 0.0398 5120/20000 [======>.......................] - ETA: 4s - loss: 0.0378 5376/20000 [=======>......................] - ETA: 4s - loss: 0.0360 5632/20000 [=======>......................] - ETA: 4s - loss: 0.0344 5888/20000 [=======>......................] - ETA: 4s - loss: 0.0329 6144/20000 [========>.....................] - ETA: 4s - loss: 0.0315 6400/20000 [========>.....................] - ETA: 4s - loss: 0.0303 6656/20000 [========>.....................] - ETA: 4s - loss: 0.0291 6880/20000 [=========>....................] - ETA: 3s - loss: 0.0282 7136/20000 [=========>....................] - ETA: 3s - loss: 0.0272 7392/20000 [==========>...................] - ETA: 3s - loss: 0.0262 7648/20000 [==========>...................] - ETA: 3s - loss: 0.0254 7904/20000 [==========>...................] - ETA: 3s - loss: 0.0246 8160/20000 [===========>..................] - ETA: 3s - loss: 0.0238 8416/20000 [===========>..................] - ETA: 3s - loss: 0.0231 8672/20000 [============>.................] - ETA: 3s - loss: 0.0224 8928/20000 [============>.................] - ETA: 3s - loss: 0.0218 9184/20000 [============>.................] - ETA: 2s - loss: 0.0212 9440/20000 [=============>................] - ETA: 2s - loss: 0.0206 9696/20000 [=============>................] - ETA: 2s - loss: 0.0200 9952/20000 [=============>................] - ETA: 2s - loss: 0.0195 10208/20000 [==============>...............] - ETA: 2s - loss: 0.0190 10464/20000 [==============>...............] - ETA: 2s - loss: 0.0186 10720/20000 [===============>..............] - ETA: 2s - loss: 0.0181 10976/20000 [===============>..............] - ETA: 2s - loss: 0.0177 11232/20000 [===============>..............] - ETA: 2s - loss: 0.0173 11488/20000 [================>.............] - ETA: 2s - loss: 0.0169 11712/20000 [================>.............] - ETA: 2s - loss: 0.0166 11968/20000 [================>.............] - ETA: 2s - loss: 0.0163 12224/20000 [=================>............] - ETA: 2s - loss: 0.0159 12480/20000 [=================>............] - ETA: 1s - loss: 0.0156 12736/20000 [==================>...........] - ETA: 1s - loss: 0.0153 12992/20000 [==================>...........] - ETA: 1s - loss: 0.0150 13248/20000 [==================>...........] - ETA: 1s - loss: 0.0147 13504/20000 [===================>..........] - ETA: 1s - loss: 0.0144 13760/20000 [===================>..........] - ETA: 1s - loss: 0.0141 14016/20000 [====================>.........] - ETA: 1s - loss: 0.0139 14272/20000 [====================>.........] - ETA: 1s - loss: 0.0136 14528/20000 [====================>.........] - ETA: 1s - loss: 0.0134 14784/20000 [=====================>........] - ETA: 1s - loss: 0.0132 15040/20000 [=====================>........] - ETA: 1s - loss: 0.0129 15296/20000 [=====================>........] - ETA: 1s - loss: 0.0127 15552/20000 [======================>.......] - ETA: 1s - loss: 0.0125 15808/20000 [======================>.......] - ETA: 1s - loss: 0.0123 16064/20000 [=======================>......] - ETA: 0s - loss: 0.0121 16320/20000 [=======================>......] - ETA: 0s - loss: 0.0119 16576/20000 [=======================>......] - ETA: 0s - loss: 0.0118 16832/20000 [========================>.....] - ETA: 0s - loss: 0.0116 17088/20000 [========================>.....] - ETA: 0s - loss: 0.0114 17344/20000 [=========================>....] - ETA: 0s - loss: 0.0112 17600/20000 [=========================>....] - ETA: 0s - loss: 0.0111 17856/20000 [=========================>....] - ETA: 0s - loss: 0.0109 18144/20000 [==========================>...] - ETA: 0s - loss: 0.0107 18400/20000 [==========================>...] - ETA: 0s - loss: 0.0106 18656/20000 [==========================>...] - ETA: 0s - loss: 0.0104 18912/20000 [===========================>..] - ETA: 0s - loss: 0.0103 19168/20000 [===========================>..] - ETA: 0s - loss: 0.0102 19456/20000 [============================>.] - ETA: 0s - loss: 0.0100 19712/20000 [============================>.] - ETA: 0s - loss: 0.0099 19968/20000 [============================>.] - ETA: 0s - loss: 0.0098 20000/20000 [==============================] - 5s 236us/step - loss: 0.0097
引入模塊:
from tqdm import tqdm_notebook as tqdm
執行代碼:
def sample_noise(G, noise_dim=10, n_samples=10000): X = np.random.uniform(0, 1, size=[n_samples, noise_dim]) y = np.zeros((n_samples, 2)) y[:, 1] = 1 return X, y def train(GAN, G, D, epochs=200, n_samples=10000, noise_dim=10, batch_size=32, verbose=False, v_freq=50): d_loss = [] g_loss = [] e_range = range(epochs) if verbose: e_range = tqdm(e_range) for epoch in e_range: X, y = sample_data_and_gen(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, True) d_loss.append(D.train_on_batch(X, y)) X, y = sample_noise(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, False) g_loss.append(GAN.train_on_batch(X, y)) if verbose and (epoch + 1) % v_freq == 0: print("Epoch #{}: Generative Loss: {}, Discriminative Loss: {}".format(epoch + 1, g_loss[-1], d_loss[-1])) return d_loss, g_loss d_loss, g_loss = train(GAN, G, D, verbose=True)
返回信息:
HBox(children=(IntProgress(value=0, max=200), HTML(value=''))) Epoch #50: Generative Loss: 5.842154026031494, Discriminative Loss: 0.4683375060558319 Epoch #100: Generative Loss: 3.4111320972442627, Discriminative Loss: 0.13123030960559845 Epoch #150: Generative Loss: 5.5205817222595215, Discriminative Loss: 0.03762095794081688 Epoch #200: Generative Loss: 4.994686603546143, Discriminative Loss: 0.045186348259449005
執行代碼:
ax = pd.DataFrame( { 'Generative Loss': g_loss, 'Discriminative Loss': d_loss, } ).plot(title='Training loss', logy=True) ax.set_xlabel("Epochs") ax.set_ylabel("Loss") plt.show()
生成圖像:
執行代碼:
N_VIEWED_SAMPLES = 2 data_and_gen, _ = sample_data_and_gen(G, n_samples=N_VIEWED_SAMPLES) pd.DataFrame(np.transpose(data_and_gen[N_VIEWED_SAMPLES:])).plot() plt.show()
生成圖像:
執行代碼:
N_VIEWED_SAMPLES = 2 data_and_gen, _ = sample_data_and_gen(G, n_samples=N_VIEWED_SAMPLES) pd.DataFrame(np.transpose(data_and_gen[N_VIEWED_SAMPLES:])).rolling(5).mean()[5:].plot() plt.show()
生成圖像:
完整代碼以下:
import numpy as np import pandas as pd import matplotlib.pyplot as plt from keras.models import Model from keras.layers import Dense, Activation, Input, Reshape from keras.layers import Conv1D, Flatten, Dropout from keras.optimizers import SGD, Adam from tqdm import tqdm_notebook as tqdm #sec def sample_data(n_samples=10000, x_vals=np.arange(0, 5, .1), max_offset=100, mul_range=[1, 2]): vectors = [] for i in range(n_samples): offset = np.random.random() * max_offset mul = mul_range[0] + np.random.random() * (mul_range[1] - mul_range[0]) vectors.append( np.sin(offset + x_vals * mul) / 2 + .5 ) return np.array(vectors) ax = pd.DataFrame(np.transpose(sample_data(5))).plot() plt.show() #sec def get_generative(G_in, dense_dim=200, out_dim=50, lr=1e-3): x = Dense(dense_dim)(G_in) x = Activation('tanh')(x) G_out = Dense(out_dim, activation='tanh')(x) G = Model(G_in, G_out) opt = SGD(lr=lr) G.compile(loss='binary_crossentropy', optimizer=opt) return G, G_out G_in = Input(shape=[10]) G, G_out = get_generative(G_in) G.summary() #sec def get_discriminative(D_in, lr=1e-3, drate=.25, n_channels=50, conv_sz=5, leak=.2): x = Reshape((-1, 1))(D_in) x = Conv1D(n_channels, conv_sz, activation='relu')(x) x = Dropout(drate)(x) x = Flatten()(x) x = Dense(n_channels)(x) D_out = Dense(2, activation='sigmoid')(x) D = Model(D_in, D_out) dopt = Adam(lr=lr) D.compile(loss='binary_crossentropy', optimizer=dopt) return D, D_out D_in = Input(shape=[50]) D, D_out = get_discriminative(D_in) D.summary() #sec def set_trainability(model, trainable=False): model.trainable = trainable for layer in model.layers: layer.trainable = trainable def make_gan(GAN_in, G, D): set_trainability(D, False) x = G(GAN_in) GAN_out = D(x) GAN = Model(GAN_in, GAN_out) GAN.compile(loss='binary_crossentropy', optimizer=G.optimizer) return GAN, GAN_out GAN_in = Input([10]) GAN, GAN_out = make_gan(GAN_in, G, D) GAN.summary() #sec def sample_data_and_gen(G, noise_dim=10, n_samples=10000): XT = sample_data(n_samples=n_samples) XN_noise = np.random.uniform(0, 1, size=[n_samples, noise_dim]) XN = G.predict(XN_noise) X = np.concatenate((XT, XN)) y = np.zeros((2*n_samples, 2)) y[:n_samples, 1] = 1 y[n_samples:, 0] = 1 return X, y def pretrain(G, D, noise_dim=10, n_samples=10000, batch_size=32): X, y = sample_data_and_gen(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, True) D.fit(X, y, epochs=1, batch_size=batch_size) pretrain(G, D) #sec def sample_noise(G, noise_dim=10, n_samples=10000): X = np.random.uniform(0, 1, size=[n_samples, noise_dim]) y = np.zeros((n_samples, 2)) y[:, 1] = 1 return X, y def train(GAN, G, D, epochs=200, n_samples=10000, noise_dim=10, batch_size=32, verbose=False, v_freq=50): d_loss = [] g_loss = [] e_range = range(epochs) if verbose: e_range = tqdm(e_range) for epoch in e_range: X, y = sample_data_and_gen(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, True) d_loss.append(D.train_on_batch(X, y)) X, y = sample_noise(G, n_samples=n_samples, noise_dim=noise_dim) set_trainability(D, False) g_loss.append(GAN.train_on_batch(X, y)) if verbose and (epoch + 1) % v_freq == 0: print("Epoch #{}: Generative Loss: {}, Discriminative Loss: {}".format(epoch + 1, g_loss[-1], d_loss[-1])) return d_loss, g_loss d_loss, g_loss = train(GAN, G, D, verbose=True) #sec ax = pd.DataFrame( { 'Generative Loss': g_loss, 'Discriminative Loss': d_loss, } ).plot(title='Training loss', logy=True) ax.set_xlabel("Epochs") ax.set_ylabel("Loss") plt.show() #sec N_VIEWED_SAMPLES = 2 data_and_gen, _ = sample_data_and_gen(G, n_samples=N_VIEWED_SAMPLES) pd.DataFrame(np.transpose(data_and_gen[N_VIEWED_SAMPLES:])).plot() plt.show() #sec N_VIEWED_SAMPLES = 2 data_and_gen, _ = sample_data_and_gen(G, n_samples=N_VIEWED_SAMPLES) pd.DataFrame(np.transpose(data_and_gen[N_VIEWED_SAMPLES:])).rolling(5).mean()[5:].plot() plt.show()
參考:
https://blog.csdn.net/tanmx219/article/details/88074600
https://blog.csdn.net/xqf1528399071/article/details/53385593