In [ ]:
from DroneEnv import DroneAutomaticDrivingEnv
import math
import random
env=DroneAutomaticDrivingEnv()

for i in range(10):
    done=False
    env.reset()
    j=0
   
    
    while not done:
        a=random.randint(0,3)
        s_prime, r, done, info =  env.step(a)
        if done:
            break
        env.render()
        j=j+1
        print(s_prime[16]*180/math.pi,s_prime[0],s_prime[2],s_prime[4],s_prime[6])
        if j > 1000:
            done =True
    env.close()
    
    print(s_prime[18],env.x_goal_pre)

     
In [ ]:
from DroneEnv import DroneAutomaticDrivingEnv
import math
import collections
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
learning_rate = 0.002
gamma         = 0.98
buffer_limit  = 50000
batch_size    = 32

class ScoreBuffer():
    def __init__(self):
        self.score_buffer=collections.deque(maxlen=10)
    def score_put(self, score):
        self.score_buffer.append(score)
        return self.score_buffer
    def score_size(self):
        return len(self.score_buffer)
    
class ReplayBuffer():
    def __init__(self):
        self.buffer = collections.deque(maxlen=buffer_limit)
    
    def put(self, transition):
        self.buffer.append(transition)
    
    def sample(self, n):
        mini_batch = random.sample(self.buffer, n)
        s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
        
        for transition in mini_batch:
            s, a, r, s_prime, done_mask = transition
            s_lst.append(s)
            a_lst.append([a])
            r_lst.append([r])
            s_prime_lst.append(s_prime)
            done_mask_lst.append([done_mask])

        return torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
               torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
               torch.tensor(done_mask_lst)
    
    def size(self):
        return len(self.buffer)

class Qnet(nn.Module):
    def __init__(self):
        super(Qnet, self).__init__()
        self.fc1 = nn.Linear(12, 256)
        self.fc2 = nn.Linear(256, 256)
        self.fc3 = nn.Linear(256, 5)
        
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
      
    def sample_action(self, obs, epsilon):
        out = self.forward(obs)
        coin = random.random()
        if coin < epsilon:
            return random.randint(0,4)
        else : 
            return out.argmax().item()
            
def train(q, q_target, memory, optimizer):
    for i in range(100):
        s,a,r,s_prime,done_mask = memory.sample(batch_size)

        q_out = q(s)
        q_a = q_out.gather(1,a)
        max_q_prime = q_target(s_prime).max(1)[0].unsqueeze(1)
        target = r + gamma * max_q_prime * done_mask
        loss = F.smooth_l1_loss(q_a, target)
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def main():
    env = DroneAutomaticDrivingEnv()
    q = Qnet()
    q_target = Qnet()
    q_target.load_state_dict(q.state_dict())
    memory = ReplayBuffer()
    score_memory=ScoreBuffer()
    
    learning_done=False
    print_interval = 100
    score = 0.0  
    optimizer = optim.Adam(q.parameters(), lr=learning_rate)
    score_pre=0
    n_epi=0
    score_interval=0.0
    while not learning_done:
    #for n_epi in range(10000):
        epsilon = max(0.01, 1- 0.5*(n_epi/5000))
        s = env.reset()
        done = False
        
        input_nn=np.array([s[0],s[2],s[4],
                           s[6],s[8],s[9],
                           s[10],s[11],s[16],
                           s[17],s[22],s[23]])
        action_iter=0
        #while not done:
        for i in range(2000):
            if action_iter ==0 :
                a = q.sample_action(torch.from_numpy(input_nn).float(), epsilon)      
                action_iter +=1
            elif action_iter ==3:
                action_iter=0
            else :
                action_iter +=1
            s_prime, r, done, info = env.step(a)
            input_nn_prime=np.array([s_prime[0],s_prime[2],s_prime[4],
                                   s_prime[6],s_prime[8],s_prime[9],
                                   s_prime[10],s_prime[11],s_prime[16],
                                   s_prime[17],s_prime[22],s_prime[23]])
            if score < -3000 or score >3000:
                done=True
            
            done_mask = 0.0 if done else 1.0
            memory.put((input_nn,a,r,input_nn_prime, done_mask))
            input_nn = input_nn_prime
           
            score += r
            score_interval +=r
            if done:
                break
            #if epsilon < 0.1 and score_interval/print_interval > 0 :
            if n_epi > 20000 :
                env.render()
             
        for j in range(score_memory.score_size()):
                if score_memory.score_buffer[i] < 100:
                    learning_done=False
                    break
                else:
                    learning_done=True
                    
                    env.close()        
        
        if memory.size()>40000:
            train(q, q_target, memory, optimizer)
        if n_epi%print_interval==0 and n_epi!=0:
            q_target.load_state_dict(q.state_dict())
            print('score = ',score_interval/print_interval,'episode = ',n_epi,'epsilon = '
                  ,epsilon,'batch size = ',memory.size())
            score_interval = 0.0
        n_epi=n_epi+1
        score=0.0
 
if __name__ == '__main__':
    main()
In [ ]:
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
from DroneEnv import DroneAutomaticDrivingEnv
import numpy as np
#Hyperparameters
learning_rate = 0.00005
gamma         = 0.98
n_rollout     = 100

class ActorCritic(nn.Module):
    def __init__(self):
        super(ActorCritic, self).__init__()
        self.data = []
        
        self.fc1 = nn.Linear(24,512)
        self.fc2 = nn.Linear(512,256)
        self.fc_pi = nn.Linear(256,5)
        self.fc_v = nn.Linear(256,1)
        self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
        
    def pi(self, x, softmax_dim = 0):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc_pi(x)
        prob = F.softmax(x, dim=softmax_dim)
        return prob
    
    def v(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        v = self.fc_v(x)
        return v
    
    def put_data(self, transition):
        self.data.append(transition)
        
    def make_batch(self):
        s_lst, a_lst, r_lst, s_prime_lst, done_lst = [], [], [], [], []
        for transition in self.data:
            s,a,r,s_prime,done = transition
            s_lst.append(s)
            a_lst.append([a])
            r_lst.append([r])
            s_prime_lst.append(s_prime)
            done_mask = 0.0 if done else 1.0
            done_lst.append([done_mask])
        
        s_batch, a_batch, r_batch, s_prime_batch, done_batch = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
                                                               torch.tensor(r_lst, dtype=torch.float), torch.tensor(s_prime_lst, dtype=torch.float), \
                                                               torch.tensor(done_lst, dtype=torch.float)
        self.data = []
        return s_batch, a_batch, r_batch, s_prime_batch, done_batch
  
    def train_net(self):
        s, a, r, s_prime, done = self.make_batch()
        td_target = r + gamma * self.v(s_prime) * done
        delta = td_target - self.v(s)
        
        pi = self.pi(s, softmax_dim=1)
        pi_a = pi.gather(1,a)
        loss = -torch.log(pi_a) * delta.detach() + F.smooth_l1_loss(self.v(s), td_target.detach())

        self.optimizer.zero_grad()
        loss.mean().backward()
        self.optimizer.step()         
      
def main():  
    env = DroneAutomaticDrivingEnv()
    model = ActorCritic()    
    print_interval = 20
    score = 0.0

    for n_epi in range(10000):
        done = False
        s = env.reset()
        for i in range(50):
        #while not done:
            for t in range(n_rollout):
                prob = model.pi(torch.from_numpy(s).float())
                m = Categorical(prob)
                a = m.sample().item()
                s_prime, r, done, info = env.step(a)
                model.put_data((s,a,r,s_prime,done))
                
                s = s_prime
                score += r
                
                if done:
                    break                      
                if n_epi > 5000:
                    env.render()
            if done :
                break
            model.train_net()
          
        if n_epi%print_interval==0 and n_epi!=0:
            print("# of episode :{}, avg score : {:.1f}".format(n_epi, score/print_interval))
            score = 0.0
    env.close()

if __name__ == '__main__':
    main()
# of episode :20, avg score : -7.0
# of episode :40, avg score : 3.6
# of episode :60, avg score : -36.1
# of episode :80, avg score : 70.8
# of episode :100, avg score : 112.0
# of episode :120, avg score : 72.0
# of episode :140, avg score : 17.9
# of episode :160, avg score : 43.5
# of episode :180, avg score : 74.4
# of episode :200, avg score : -15.6
# of episode :220, avg score : 36.4
# of episode :240, avg score : 58.9
# of episode :260, avg score : 115.0
# of episode :280, avg score : 92.6
# of episode :300, avg score : 136.1
# of episode :320, avg score : 179.7
# of episode :340, avg score : 143.0
# of episode :360, avg score : 141.3
# of episode :380, avg score : 37.8
# of episode :400, avg score : 105.5
# of episode :420, avg score : 182.1
# of episode :440, avg score : -74.0
# of episode :460, avg score : -267.9
# of episode :480, avg score : -158.2
# of episode :500, avg score : -352.8
# of episode :520, avg score : -162.4
# of episode :540, avg score : -172.6
# of episode :560, avg score : -259.9
# of episode :580, avg score : -139.1
# of episode :600, avg score : -229.8
# of episode :620, avg score : -298.0
# of episode :640, avg score : -487.7
# of episode :660, avg score : -537.5
# of episode :680, avg score : -3553.7
# of episode :700, avg score : -4470.0
# of episode :720, avg score : -4205.0
# of episode :740, avg score : -4735.0
# of episode :760, avg score : -4735.0
# of episode :780, avg score : -5000.0
# of episode :800, avg score : -5000.0
# of episode :820, avg score : -4735.0
# of episode :840, avg score : -4140.1
# of episode :860, avg score : -4735.0
# of episode :880, avg score : -4735.0
# of episode :900, avg score : -5000.0
# of episode :920, avg score : -4470.0
# of episode :940, avg score : -4470.0
# of episode :960, avg score : -4735.0
# of episode :980, avg score : -2999.5
# of episode :1000, avg score : -1371.3
# of episode :1020, avg score : 75.5
# of episode :1040, avg score : -59.2
# of episode :1060, avg score : 70.6
# of episode :1080, avg score : 31.7
# of episode :1100, avg score : 65.2
# of episode :1120, avg score : 33.8
# of episode :1140, avg score : 56.5
# of episode :1160, avg score : -3.8
# of episode :1180, avg score : 8.9
# of episode :1200, avg score : 74.3
# of episode :1220, avg score : 155.8
# of episode :1240, avg score : -13.2
# of episode :1260, avg score : 32.5
# of episode :1280, avg score : 92.8
# of episode :1300, avg score : 43.0
# of episode :1320, avg score : 43.6
# of episode :1340, avg score : 33.5
# of episode :1360, avg score : 45.4
# of episode :1380, avg score : 121.5
# of episode :1400, avg score : 10.5
# of episode :1420, avg score : 89.0
# of episode :1440, avg score : 42.8
# of episode :1460, avg score : 47.5
# of episode :1480, avg score : 107.8
# of episode :1500, avg score : 46.0
# of episode :1520, avg score : 28.0
# of episode :1540, avg score : 10.7
# of episode :1560, avg score : 33.0
# of episode :1580, avg score : 110.8
# of episode :1600, avg score : 26.5
# of episode :1620, avg score : 51.5
# of episode :1640, avg score : 64.8
# of episode :1660, avg score : -31.6
# of episode :1680, avg score : 130.9
# of episode :1700, avg score : 113.2
# of episode :1720, avg score : -14.7
# of episode :1740, avg score : 31.9
# of episode :1760, avg score : 94.3
# of episode :1780, avg score : 104.6
# of episode :1800, avg score : 48.1
# of episode :1820, avg score : 50.8
# of episode :1840, avg score : 48.5
# of episode :1860, avg score : 116.1
# of episode :1880, avg score : 38.5
# of episode :1900, avg score : 39.6
# of episode :1920, avg score : 64.0
# of episode :1940, avg score : 91.7
# of episode :1960, avg score : 45.7
# of episode :1980, avg score : 84.4
# of episode :2000, avg score : 109.3
# of episode :2020, avg score : -37.9
# of episode :2040, avg score : 65.6
# of episode :2060, avg score : -14.2
# of episode :2080, avg score : 86.4
# of episode :2100, avg score : 102.7
# of episode :2120, avg score : 57.5
# of episode :2140, avg score : 43.9
# of episode :2160, avg score : -71.5
# of episode :2180, avg score : 78.3
# of episode :2200, avg score : 34.8
# of episode :2220, avg score : 44.8
# of episode :2240, avg score : -11.6
# of episode :2260, avg score : -45.0
# of episode :2280, avg score : -12.5
# of episode :2300, avg score : -22.6
# of episode :2320, avg score : -7.4
# of episode :2340, avg score : 47.1
# of episode :2360, avg score : -76.0
# of episode :2380, avg score : 116.2
# of episode :2400, avg score : 132.8
# of episode :2420, avg score : 79.2
# of episode :2440, avg score : 13.9
# of episode :2460, avg score : -14.7
# of episode :2480, avg score : 10.7
# of episode :2500, avg score : 57.8
# of episode :2520, avg score : 61.5
# of episode :2540, avg score : 48.0
# of episode :2560, avg score : 46.1
# of episode :2580, avg score : 27.2
# of episode :2600, avg score : 37.9
# of episode :2620, avg score : 53.5
# of episode :2640, avg score : 45.2
# of episode :2660, avg score : 96.5
# of episode :2680, avg score : 123.3
# of episode :2700, avg score : 61.1
# of episode :2720, avg score : 101.0
# of episode :2740, avg score : 60.7
# of episode :2760, avg score : -10.9
# of episode :2780, avg score : 8.2
# of episode :2800, avg score : 63.9
# of episode :2820, avg score : 45.5
# of episode :2840, avg score : 89.7
# of episode :2860, avg score : 40.8
# of episode :2880, avg score : 63.6
# of episode :2900, avg score : 50.5
# of episode :2920, avg score : 37.6
# of episode :2940, avg score : 70.1
# of episode :2960, avg score : 60.1
# of episode :2980, avg score : -18.5
# of episode :3000, avg score : 105.3
# of episode :3020, avg score : 45.0
# of episode :3040, avg score : -17.4
# of episode :3060, avg score : 17.1
# of episode :3080, avg score : 6.5
# of episode :3100, avg score : 25.6
# of episode :3120, avg score : -56.0
# of episode :3140, avg score : 28.6
# of episode :3160, avg score : -15.5
# of episode :3180, avg score : 20.8
# of episode :3200, avg score : 67.0
# of episode :3220, avg score : 53.2
# of episode :3240, avg score : -36.6
# of episode :3260, avg score : 60.5
# of episode :3280, avg score : 71.9
# of episode :3300, avg score : 3.3
# of episode :3320, avg score : -70.5
# of episode :3340, avg score : -5.2
# of episode :3360, avg score : 38.5
# of episode :3380, avg score : 61.6
# of episode :3400, avg score : 68.7
# of episode :3420, avg score : -31.5
# of episode :3440, avg score : 25.2
# of episode :3460, avg score : 51.7
# of episode :3480, avg score : -8.3
# of episode :3500, avg score : 83.1
# of episode :3520, avg score : 33.5
# of episode :3540, avg score : 75.6
# of episode :3560, avg score : -70.3
# of episode :3580, avg score : -6.7
# of episode :3600, avg score : 40.4
# of episode :3620, avg score : 24.8
# of episode :3640, avg score : 115.9
# of episode :3660, avg score : 97.9
# of episode :3680, avg score : 42.7
# of episode :3700, avg score : 22.6
# of episode :3720, avg score : -2.5
# of episode :3740, avg score : 112.3
# of episode :3760, avg score : 33.1
# of episode :3780, avg score : 64.5
# of episode :3800, avg score : 85.9
# of episode :3820, avg score : -35.5
# of episode :3840, avg score : 24.4
# of episode :3860, avg score : 55.1
# of episode :3880, avg score : -19.0
# of episode :3900, avg score : 103.5
# of episode :3920, avg score : 17.9
# of episode :3940, avg score : -7.6
# of episode :3960, avg score : 133.7
# of episode :3980, avg score : 90.6
# of episode :4000, avg score : 82.7
# of episode :4020, avg score : 111.8
# of episode :4040, avg score : 6.9
# of episode :4060, avg score : 80.0
# of episode :4080, avg score : -15.5
# of episode :4100, avg score : 33.9
# of episode :4120, avg score : 38.2
# of episode :4140, avg score : 21.8
# of episode :4160, avg score : 94.5
# of episode :4180, avg score : 89.7
# of episode :4200, avg score : 31.3
# of episode :4220, avg score : 2.0
# of episode :4240, avg score : -9.5
# of episode :4260, avg score : 12.1
# of episode :4280, avg score : 29.7
# of episode :4300, avg score : -3.0
# of episode :4320, avg score : -25.1
# of episode :4340, avg score : 10.6
# of episode :4360, avg score : 10.9
# of episode :4380, avg score : 61.4
# of episode :4400, avg score : 15.8
# of episode :4420, avg score : 111.5
# of episode :4440, avg score : 2.2
# of episode :4460, avg score : -12.1
# of episode :4480, avg score : -5.5
# of episode :4500, avg score : 13.2
# of episode :4520, avg score : 1.1
# of episode :4540, avg score : 83.5
# of episode :4560, avg score : -17.9
# of episode :4580, avg score : 70.3
# of episode :4600, avg score : 68.7
# of episode :4620, avg score : 70.0
# of episode :4640, avg score : 35.8
# of episode :4660, avg score : 4.5
# of episode :4680, avg score : 97.7
# of episode :4700, avg score : 53.9
# of episode :4720, avg score : 49.5
# of episode :4740, avg score : 45.9
# of episode :4760, avg score : 25.4
# of episode :4780, avg score : 114.3
# of episode :4800, avg score : 86.7
# of episode :4820, avg score : 17.1
# of episode :4840, avg score : 32.0
# of episode :4860, avg score : 62.6
# of episode :4880, avg score : 45.0
# of episode :4900, avg score : 27.9
# of episode :4920, avg score : 18.5
# of episode :4940, avg score : 114.0
# of episode :4960, avg score : -34.5
# of episode :4980, avg score : 89.2
# of episode :5000, avg score : 18.0
# of episode :5020, avg score : 103.5
# of episode :5040, avg score : 36.3
# of episode :5060, avg score : 72.7
# of episode :5080, avg score : 65.5
# of episode :5100, avg score : 27.9
# of episode :5120, avg score : 117.2
# of episode :5140, avg score : 51.5
# of episode :5160, avg score : 55.5
# of episode :5180, avg score : 69.2
# of episode :5200, avg score : 45.2
# of episode :5220, avg score : -46.2
# of episode :5240, avg score : 55.6
# of episode :5260, avg score : 110.5
# of episode :5280, avg score : 45.2
# of episode :5300, avg score : 102.1
# of episode :5320, avg score : -12.3
# of episode :5340, avg score : 73.8
# of episode :5360, avg score : -8.6
# of episode :5380, avg score : 11.4
# of episode :5400, avg score : -8.0
# of episode :5420, avg score : 71.0
# of episode :5440, avg score : 72.5
# of episode :5460, avg score : 66.5
# of episode :5480, avg score : -9.2
# of episode :5500, avg score : -4.6
# of episode :5520, avg score : 16.9
# of episode :5540, avg score : 44.5
# of episode :5560, avg score : 63.5
# of episode :5580, avg score : 14.8
# of episode :5600, avg score : 17.6
# of episode :5620, avg score : 65.8
# of episode :5640, avg score : 56.7
# of episode :5660, avg score : 7.3
# of episode :5680, avg score : 52.8
# of episode :5700, avg score : -1.4
# of episode :5720, avg score : 33.0
# of episode :5740, avg score : 10.8
# of episode :5760, avg score : -22.6
# of episode :5780, avg score : 72.6
# of episode :5800, avg score : 25.4
# of episode :5820, avg score : 20.6
# of episode :5840, avg score : 70.2
# of episode :5860, avg score : 83.3
# of episode :5880, avg score : 58.3
In [ ]:
done= False
env = DroneAutomaticDrivingEnv()
s=env.reset()
while not done:
    prob = model.pi(torch.from_numpy(s).float())
    m = Categorical(prob)
    a = m.sample().item()
    s_prime, r, done, info = env.step(a)
    s = s_prime
    env.render()
In [ ]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.distributions import Categorical

class test(nn.Module):
    def __init__(self):
        super(test,self).__init__()
        self.fc1=nn.Linear(4,128)
        self.fc2=nn.Linear(128,2)
        
    def forward(self, x):
        x= F.relu(self.fc1(x))
        x= F.relu(self.fc2(x))
        return x
s=np.array([1,1,1,1])
model=test()    
prob = model(torch.from_numpy(s).float())
m = Categorical(prob)
a = m.sample().item()
print(a)
In [ ]:
a=torch.FloatTensor(np.array([0,-1]))
print(a)
print(torch.log(a))
pi=F.softmax(a,dim=0)
print(pi)
pi=torch.log(pi)
print(pi)
pi_a = pi.gather(0,torch.LongTensor([0]))
In [ ]:
pi = F.softmax(model(torch.from_numpy(s).float()),dim=0)
print(pi)
pi_a = pi.gather(0,torch.LongTensor([0]))
print(pi_a)
In [ ]:
torch.log(torch.FloatTensor(np.array([0.00000001])))
In [ ]:
from DroneEnv import DroneAutomaticDrivingEnv
import numpy as np
env = DroneAutomaticDrivingEnv()
s=env.reset()
print(env.state)
input_nn=np.array([env.state[0],env.state[2],env.state[4],
                           env.state[6],env.state[8],env.state[9],
                           env.state[10],env.state[11],env.state[16],
                           env.state[17],env.state[18],env.state[19]])
In [ ]:
from DroneEnv import DroneAutomaticDrivingEnv
import math
import collections
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np

env=DroneAutomaticDrivingEnv()
epsilon =0
s = env.reset()
done = False
input_nn=np.array([s[0],s[2],s[4],
                     s[6],s[8],s[9],
                     s[10],s[11],s[16],
                      s[17],s[22],s[23]])
action_iter=0
while not done:
    if action_iter ==0 :
                a = q.sample_action(torch.from_numpy(input_nn).float(), epsilon)      
                action_iter +=1
    elif action_iter ==3:
                action_iter=0
    else :
                action_iter +=1
    s_prime, r, done, info = env.step(a)
    input_nn_prime=np.array([s_prime[0],s_prime[2],s_prime[4],
                            s_prime[6],s_prime[8],s_prime[9],
                            s_prime[10],s_prime[11],s_prime[16],
                            s_prime[17],s_prime[22],s_prime[23]])
            
    done_mask = 0.0 if done else 1.0
    input_nn = input_nn_prime
    if done:
        break
    env.render()
             
In [ ]:
 
In [ ]:
 
In [ ]:
    
In [ ]:
 
In [ ]:
 
In [ ]:

In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
q = Qnet()
q.state_dict()
In [ ]:
 
In [ ]:
print(q.state_dict())
In [ ]: