Skip to content

Deep Q Learning #2811

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
johan606303 opened this issue Jun 29, 2020 · 3 comments
Closed

Deep Q Learning #2811

johan606303 opened this issue Jun 29, 2020 · 3 comments

Comments

@johan606303
Copy link

Hi.

I made a code for obstacle avoidance, but there is an error that I can't understand the origin.
At a random iteration (example : 400, 1208,...) VS2019 interrupt the training, and returns me the following error.
algo_error

There is the full code. As I'm new with RL, it's not a complexe code. And where there are variables inside tim.sleep(x), that are divided by 300, it's because I've accelerated the time of UE4.

Thak you for help.

`
import numpy as np
import airsim
import time
import math
import tensorflow as tf
import keras
from airsim.utils import to_eularian_angles
from airsim.utils import to_quaternion
from keras.layers import Conv2D,Dense
from keras.layers import Activation
from keras.layers import MaxPool2D
from keras.layers import Dropout
from keras.layers import Input
import keras.backend as K
from keras.models import load_model
from keras import Input
from keras.layers import Flatten
from keras.activations import softmax,elu,relu
from keras.optimizers import Adam
from keras.optimizers import adam
from keras.models import Sequential
from keras.optimizers import Adam, RMSprop
from keras.models import Model
#tf.compat.v1.disable_eager_execution()
import random

from collections import deque

client=airsim.MultirotorClient()
z=-5
memory_size=10000000
#pos_0=client.getMultirotorState().kinematics_estimated.position

#state_space=[84, 84]
#action_size=3

def OurModel(state_size,action_space):

X_input=Input(state_size,name='Input')
X=Conv2D(filters=32,kernel_size=(8,8),strides=(4,4),padding='valid',activation='relu')(X_input)
X=MaxPool2D(pool_size=(2,2))(X)
X=Conv2D(filters=64,kernel_size=(4,4),strides=(2,2),padding='valid',activation='relu')(X)
X=MaxPool2D(pool_size=(2,2))(X)
X=Conv2D(filters=64,kernel_size=(1,1),strides=(1,1),padding='valid',activation='relu')(X)

X=Flatten()(X)

X=Dense(525,activation='relu')(X)
X=Dropout(0.3)(X)
X=Dense(300,activation='relu')(X)
X_output=Dense(action_space,activation='softmax')(X)

model=Model(inputs = X_input, outputs = X_output)
model.compile(loss="mse", optimizer=RMSprop(lr=0.0005, rho=0.95, epsilon=0.01), metrics=["accuracy"])
model.summary()

return model

class MemoryClass():
def init(self,memory_size):
self.memory_size=memory_size
self.buffer=deque(maxlen=memory_size)
self.batch_size=64
#self.start_training=20

def add(self,experience):
    self.buffer.append(experience)

def sample(self):
    buffer_size=len(self.buffer)
    idx=np.random.choice(np.arange(buffer_size),self.batch_size,False)
    return [self.buffer[k] for k in idx]

def replay(self):
    batch=self.sample()
    next_states_mb=np.array([each[0] for each in batch],ndmin=3)
    actions_mb=np.array([each[1] for each in batch])
    states_mb=np.array([each[2] for each in batch],ndmin=3)
    rewards_mb=np.array([each[3] for each in batch])
    dones_mb=np.array([each[4] for each in batch])

    return next_states_mb, actions_mb, states_mb, rewards_mb,dones_mb

class Agent():
def init(self):
self.state_size=(84, 84,1)
self.action_space=3
#self.DQNNetwork=DQNN(state_size,action_space)
self.model1=OurModel(self.state_size,self.action_space)
self.memory_size=10000000
self.memory=MemoryClass(memory_size)
self.gamma=0.75
self.epsilon_min=0.001
self.epsilon=1.0
self.epsilon_decay=0.995
self.episodes=120
self.max_step=120
self.step=0
self.count=0
self.pos0=client.getMultirotorState().kinematics_estimated.position
self.z=-5
self.goal_pos=[50,50]
self.initial_position=[0,0]
self.initial_distance=np.sqrt((self.initial_position[0]-self.goal_pos[0])**2+(self.initial_position[1]-self.goal_pos[1])**2)
self.batch_size=30

def generate_state(self):
    responses = client.simGetImages([airsim.ImageRequest("0", airsim.ImageType.DepthPerspective, True, False)])
    img1d = np.array(responses[0].image_data_float, dtype=np.float)
    #img1d = 255/np.maximum(np.ones(img1d.size), img1d)
    img2d = np.reshape(img1d, (responses[0].height, responses[0].width))

    from PIL import Image
    image = Image.fromarray(img2d)
    im_final = np.array(image.resize((84, 84)).convert('L'))
    im_final=np.reshape(im_final,[*self.state_size])
    return im_final

def load(self, name):
    self.model1 = load_model(name)


def save(self, name):
    self.model1.save(name)

def get_yaw(self):
    quaternions=client.getMultirotorState().kinematics_estimated.orientation
    a,b,yaw_rad=to_eularian_angles(quaternions)
    yaw_deg=math.degrees(yaw_rad)
    return yaw_deg,yaw_rad

def rotate_left(self):
    client.moveByRollPitchYawrateZAsync(0,0,0.2,self.z,3)
    n=int(3*5)
    D=[]
    done=False
    for k in range(n):
        collision=client.simGetCollisionInfo().has_collided
        done=collision
        D.append(collision)
        time.sleep(3/(n*300))
    if True in D:
        done=True
    time.sleep(3/300)
    time.sleep(5/300)
    new_state=self.generate_state()
    return done,new_state

def rotate_right(self):
    client.moveByRollPitchYawrateZAsync(0,0,-0.2,self.z,3)
    n=int(3*5)
    D=[]
    done=False
    for k in range(n):
        collision=client.simGetCollisionInfo().has_collided
        done=collision
        D.append(collision)
        time.sleep(3/(n*300))
    if True in D:
        done=True
    time.sleep(3/300)
    time.sleep(5/300)
    new_state=self.generate_state()
    return done,new_state

def move_forward(self):
    yaw_deg,yaw_rad=self.get_yaw()
    #need rad
    vx=math.cos(yaw_rad)*0.25
    vy=math.sin(yaw_rad)*0.25
    client.moveByVelocityAsync(vx,vy,0,10,airsim.DrivetrainType.ForwardOnly,airsim.YawMode(False))
    done=False
    n=int(10*5)
    D=[]
    done=False
    for k in range(n):
        collision=client.simGetCollisionInfo().has_collided
        D.append(collision)
        time.sleep(3.4/(n*300))
    if True in D:
        done=True
    new_state=self.generate_state()
    time.sleep(15/300)
    return done,new_state

def step_function(self,action):
    # Returns action,new_state, done
    # Move forward 3 meters by Pitch
    done=False
    if action==0:
        done,new_state=self.move_forward()
    # Rotate to right by 20 degress
    elif action==1:
        done,new_state=self.rotate_right()
    # Rotate to left by 30 degress
    elif action==2:
        done,new_state=self.rotate_left()
    self.count+=1
    return action,new_state,done


def compute_reward(self,done):
    reward=0.0
    pos_now=client.getMultirotorState().kinematics_estimated.position
    dist=np.sqrt((pos_now.x_val-self.goal_pos[0])**2+(pos_now.y_val-self.goal_pos[1])**2)
    print('dist: ',dist)

    if done==False and self.step<self.max_step:
        reward+=(self.initial_distance-dist)*6
        if 10<self.step<40 and dist>self.initial_distance*3/4:
            reward=-2-(self.step-10)
        elif 50<self.step<80 and dist>self.initial_distance*2/4:
            reward=-36-(self.step-50)
        elif 80<self.step<self.max_step and dist>self.initial_distance*1/4:
            reward=-80-(self.step-80)
        elif dist<3:
            reward+=650.0
    elif done==True and dist>3:
        reward-=180.0
    print('reward: ',reward)
    return reward


def choose_action(self,state):
    r=np.random.rand()
    print('r: ',r)
    print('epsilon: ',self.epsilon)
    print()
    if r>self.epsilon and self.count>64:
        #print('predicted action')
        state=np.reshape(state,[1,*self.state_size])

        #action=np.argmax(self.DQNNetwork.OurModel.predict(state))
        action=np.argmax(self.model1.predict(state))
    else:
        action=random.randrange(self.action_space)
    return action


def reset(self):
    client.reset()


def initial_pos(self):
    client.enableApiControl(True)
    v=0.6
    #z0=client.getMultirotorState().kinematics_estimated.position.z_val
    #t=np.abs(z0-self.z)/v
    client.moveToZAsync(self.z,v).join()
    #time.sleep(t+1)
    

def epsilon_policy(self):
    # Update epsilon
    if self.epsilon>self.epsilon_min:
        self.epsilon*=self.epsilon_decay


def train(self):
    for episode in range(self.episodes):
        self.initial_pos()
        self.step=0
        state=self.generate_state()
        done=False
        total_reward,episode_rewards=[],[]
        while self.step<self.max_step:
            self.step+=1
            print('count:', self.count)
            choice=self.choose_action(state)
            self.epsilon_policy()
            action,next_state,done=self.step_function(choice)
            reward=self.compute_reward(done)
            episode_rewards.append(reward)
            if done==True:
                total_reward.append(sum(episode_rewards))
                self.memory.add([next_state,action,state,reward,done])
                self.step=self.max_step
                self.reset()
                print("episode: {}, epsilon: {:.5}, total reward :{}".format(episode, self.epsilon,total_reward[-1]))
                self.save("airsim-dqn.h5")
            else:
                state=next_state
                self.memory.add([next_state,action,state,reward,done])
            if len(self.memory.buffer)>64:
                next_states_mb, actions_mb, states_mb, rewards_mb,dones_mb=self.memory.replay()
                target = self.model1.predict(states_mb)
                target_next = self.model1.predict(next_states_mb)
                for k in range(len(dones_mb)):
                    if dones_mb[k]==True:
                        target[k][actions_mb[k]] = rewards_mb[k]
                    elif dones_mb[k]==False:
                        target[k][actions_mb[k]] = rewards_mb[k] + self.gamma * (np.amax(target_next[k]))
                self.model1.fit(x=states_mb,y=target,batch_size=self.batch_size)

agent=Agent()
agent.train()

`

@rajat2004
Copy link
Contributor

rajat2004 commented Jun 30, 2020

UE can sometimes fail to render the image, and in that case, the image would be empty. You'll have to handle this case, maybe call the API again if the image is empty?
Note that right now, Airsim fills the array with a single 0 since when the code was written, rpclib had a bug with handling empty structs - rpclib/rpclib#152, so you'll have to check if the length is 1, then handle it accordingly
Also see #1840 for another way to check if image is empty

Since the RPC problem has been fixed, I think the workaround in AirSim can be removed now
Edit: Opened #2812 which removes it

A discussion could be had whether an empty image should be handled by Airsim internally or by the user

@johan606303
Copy link
Author

@rajat2004 An another question if possible. Is there a way, in AirSim, to reset position of 1 drone only. For example if I try to use DQL or RL for several multirators ?

@rajat2004
Copy link
Contributor

Hmm, the current reset() API doesn't take in Vehicle name as an arg, but it should be possible to use it on a per-vehicle basis - https://github.com/microsoft/AirSim/blob/master/AirLib/src/api/RpcLibServerBase.cpp#L183. But not sure as to how it'll affect the physics
As a shortcut for now, you could try using client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(0, 0, 0), airsim.to_quaternion(0, 0, 0)), True, vehicle_name='')

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants