mxnet(gluon) 實現DQN簡單小例子

參考文獻git

莫凡系列課程視頻github

加強學習入門之Q-Learning算法


關於加強學習的基本知識能夠參考第二個連接,講的挺有意思的。DQN的東西能夠看第一個連接相關視頻。課程中實現了Tensorflow和pytorch的示例代碼。本文主要是改寫成了gluon實現網絡

Q-learning的算法流程app


DQN的算法流程dom


對於DQN的理解:學習

加強學習中須要學習的東西是Q-table,決策表。而針對於state space空間太大的情形,很難甚至不可能構建這個決策表。而決策表其實就是一種映射 (s,a)->R, 那麼這種映射能夠經過網絡來構建,因而就有了DQNspa

image

下面來看代碼.net

import mxnet as mx
import mxnet.ndarray as nd
import mxnet.gluon as gluon
import numpy as np
import mxnet.gluon.nn as nn
import gymscala


BATCH_SIZE=64                                             # 訓練網絡時的batchsize
LR=0.01                                                         # 權重更新的學習率
EPSILON=0.9                                                  # 每次以機率選擇最有策略,有點相似於生物算法的思想
GAMMA=0.5                                                    # 計算q_target是下一個狀態收益對當前的影響
TARGET_REPLACE_ITER=100                            # 保存網絡參數,能夠理解爲上一次的映射,的頻率
MEMORY_CAPACITY=1000                                # 歷史決策
env = gym.make('CartPole-v0')                         # 調用OpenAI.gym構建的env
env = env.unwrapped
N_ACTIONS=env.action_space.n                       # 備選策略的個數
N_STATES = env.observation_space.shape[0]    # 狀態向量的長度


# 定義所須要的網絡,示例僅隨意設置了幾層

class Net(nn.HybridBlock):
     def __init__(self,**kwargs):
         super(Net, self).__init__(**kwargs)
         with self.name_scope():
             self.fc1 = nn.Dense(16, activation='relu')
             self.fc2 = nn.Dense(32, activation='relu')
             self.fc3 = nn.Dense(16, activation='relu')
             self.out = nn.Dense(N_ACTIONS)
     def hybrid_forward(self, F, x):
         x = self.fc1(x)
         x = self.fc2(x)
         x = self.fc3(x)
         actions_value = self.out(x)
         return actions_value


# 定義網絡權重的拷貝方法。主要是由於DQN learning中採用off-policy更新,也就是說須要上一次的映射圖,這可使用網絡上一次的權重保存,這個用以保存權重的網絡只有前向功能,相似於查表,並不更新參數,直到知足必定條件時將當前網絡參數再次存儲

def copy_params(src, dst):
     dst.initialize(force_reinit=True, ctx=mx.cpu())
     layer_names = ['dense0_weight', 'dense0_bias','dense1_weight','dense1_bias',
                  'dense2_weight','dense2_bias','dense3_weight','dense3_bias']
     for i in range(len(layer_names)):
         dst.get(layer_names[i]).set_data(src.get(layer_names[i]).data())



# 定義DQN類,包含網絡、策略選擇、保存記錄等

class DQN(object):
     def __init__(self):
         self.eval_net, self.target_net = Net(), Net()
         self.eval_net.initialize()
         self.target_net.initialize()
         x=nd.random_uniform(shape=(1,N_STATES))
         _ = self.eval_net(x)
         _ = self.target_net(x)                # mxnet的延遲初始化特性
         self.learn_step_counter = 0
         self.memory_counter = 0
         self.memory = np.zeros(shape=(MEMORY_CAPACITY, N_STATES*2+2))
         # 每一行存儲的是當前狀態,選擇的action, 當前的回報, 下一步的狀態
         self.trainer = gluon.Trainer(self.eval_net.collect_params(), 'sgd',\
                                     {'learning_rate': LR,'wd':1e-4})
         self.loss_func = gluon.loss.L2Loss()
         self.cost_his=[]
     def choose_action(self, x):
         if np.random.uniform()<EPSILON:
             # EPSILON的機率選擇最可能動做
             x = nd.array([x])
             actions_value = self.eval_net(x)
             action = int(nd.argmax(actions_value, axis=1).asscalar())
         else:
             action = np.random.randint(0, N_ACTIONS)
         return action
     def store_transition(self,s,a,r,s_):
         # 存儲歷史紀錄
         transition = np.hstack((s,[a,r],s_))
         index = self.memory_counter % MEMORY_CAPACITY
         # 主要是爲了循環利用存儲空間
         self.memory[index,:] = transition
         self.memory_counter += 1
        
     def learn(self):
         if self.learn_step_counter % TARGET_REPLACE_ITER==0:
             # 每學習必定間隔以後,將當前的狀態
             copy_params(self.eval_net.collect_params(), self.target_net.collect_params())
            
         self.learn_step_counter += 1
        
         sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
         # 隨機選擇一組狀態
         b_memory = self.memory[sample_index,:]
       
         b_s = nd.array(b_memory[:,:N_STATES])
         b_a = nd.array(b_memory[:,N_STATES:N_STATES+1])
         b_r = nd.array(b_memory[:,N_STATES+1:N_STATES+2])
         b_s_= nd.array(b_memory[:,-N_STATES:])
         with mx.autograd.record():
             q_eval = self.eval_net(b_s) # 預估值
             with mx.autograd.pause():
                 q_next = self.target_net(b_s_) # 歷史值 batch x N_ACTIONS
             q_target = b_r + GAMMA*nd.max(q_next, axis=1)
             loss = self.loss_func(q_eval, q_target)
        
         self.cost_his.append(nd.mean(loss).asscalar())
         loss.backward()
         self.trainer.step(BATCH_SIZE)
        
     def plot_cost(self):
         import matplotlib.pyplot as plt
         plt.plot(np.arange(len(self.cost_his)), self.cost_his)
         plt.ylabel('Cost')
         plt.xlabel('training steps')
         plt.show()


# 訓練
dqn = DQN()
for i_episode in range(500):
     s = env.reset()
     while True:
         env.render()
         a = dqn.choose_action(s)
         s_, r, done, info = env.step(a)# 到達的狀態,收益,是否結束 

        x,x_dot, theta, theta_dot = s_
         r1 = (env.x_threshold - abs(x))/env.x_threshold - 0.8
         r2 = (env.theta_threshold_radians - abs(theta))/env.theta_threshold_radians-0.5
         r = r1 + r2

        dqn.store_transition(s,a,r,s_)
         if dqn.memory_counter > MEMORY_CAPACITY:
             dqn.learn()

         if done:
             break
        
         s = s_
dqn.plot_cost() 


loss曲線

loss


訓練的loss彷佛並無收斂,還在找緣由


ps. 第一次使用open live writer寫博客,體驗不好!!!!!我須要公式、代碼和圖片的支持。。。。還在尋找中

相關文章
相關標籤/搜索