97 lines
3.8 KiB
Python
97 lines
3.8 KiB
Python
"""
|
|
Exp 7: mountain_track, v5 reward, throttle_min=0.5, lr=0.000725, 90k steps
|
|
- steps_per_switch=6000 → 15 checkpoints saved across the run
|
|
- best_model.zip saved whenever a new best segment score is achieved
|
|
- Single track: TRAINING_TRACKS overridden to mountain_track only
|
|
"""
|
|
import sys, os, time
|
|
sys.path.insert(0, '/home/paulh/projects/donkeycar-rl-autoresearch/agent')
|
|
|
|
import multitrack_runner as mr
|
|
from multitrack_runner import (log, _send_exit_scene, StuckTerminationWrapper,
|
|
train_multitrack)
|
|
from donkeycar_sb3_runner import ThrottleClampWrapper
|
|
from reward_wrapper import SpeedRewardWrapper
|
|
from stable_baselines3 import PPO
|
|
from stable_baselines3.common.vec_env import DummyVecEnv, VecTransposeImage
|
|
import gymnasium as gym
|
|
|
|
THROTTLE_MIN = 0.5
|
|
LR = 0.000725
|
|
TOTAL_STEPS = 90000
|
|
STEPS_PER_SEG = 6000 # 15 checkpoints across the run, NOT 1
|
|
SAVE_DIR = '/home/paulh/projects/donkeycar-rl-autoresearch/agent/models/exp7-mountain-checkpointed'
|
|
os.makedirs(SAVE_DIR, exist_ok=True)
|
|
|
|
def make_env(env_id):
|
|
raw = gym.make(env_id)
|
|
env = ThrottleClampWrapper(raw, throttle_min=THROTTLE_MIN)
|
|
env = StuckTerminationWrapper(env, stuck_steps=80, min_displacement=0.5)
|
|
env = SpeedRewardWrapper(env)
|
|
return env
|
|
|
|
log('='*60)
|
|
log('Exp 7: mountain_track ONLY, v5 reward, throttle_min=0.5')
|
|
log(f' lr={LR}, total_steps={TOTAL_STEPS:,}, steps_per_seg={STEPS_PER_SEG:,}')
|
|
log(f' Checkpoints: {TOTAL_STEPS // STEPS_PER_SEG} saves across the run')
|
|
log(f' Save dir: {SAVE_DIR}')
|
|
log('='*60)
|
|
|
|
# Switch sim to mountain_track
|
|
log('Switching to mountain_track...')
|
|
tmp = gym.make('donkey-mountain-track-v0'); time.sleep(2)
|
|
_send_exit_scene(tmp, verbose=False); tmp.close(); time.sleep(5)
|
|
env = VecTransposeImage(DummyVecEnv([lambda: make_env('donkey-mountain-track-v0')]))
|
|
model = PPO('CnnPolicy', env, learning_rate=LR, verbose=1, device='cpu')
|
|
|
|
# Override to single track — segments still run every 6k steps for checkpointing
|
|
_orig = mr.TRAINING_TRACKS
|
|
mr.TRAINING_TRACKS = [('mountain_track', 'donkey-mountain-track-v0')]
|
|
|
|
env, segment_rewards = train_multitrack(
|
|
model, env,
|
|
total_timesteps=TOTAL_STEPS,
|
|
steps_per_switch=STEPS_PER_SEG,
|
|
save_dir=SAVE_DIR,
|
|
)
|
|
|
|
mr.TRAINING_TRACKS = _orig
|
|
|
|
log(f'\nCheckpoints saved in {SAVE_DIR}:')
|
|
for f in sorted(os.listdir(SAVE_DIR)):
|
|
size = os.path.getsize(os.path.join(SAVE_DIR, f)) // (1024*1024)
|
|
log(f' {f} ({size}MB)')
|
|
|
|
env.close(); time.sleep(3)
|
|
|
|
# Eval best_model on all tracks
|
|
best_path = os.path.join(SAVE_DIR, 'best_model.zip')
|
|
|
|
def eval_track(current_id, track_id, name, n=3):
|
|
log(f'\n--- EVAL: {name} ---')
|
|
tmp = gym.make(current_id); time.sleep(2)
|
|
_send_exit_scene(tmp, verbose=False); tmp.close(); time.sleep(5)
|
|
ev = VecTransposeImage(DummyVecEnv([lambda: make_env(track_id)]))
|
|
m = PPO.load(best_path, env=ev, device='cpu')
|
|
for ep in range(1, n+1):
|
|
obs = ev.reset(); total, steps, done = 0.0, 0, False
|
|
while not done and steps < 2000:
|
|
action, _ = m.predict(obs, deterministic=True)
|
|
result = ev.step(action)
|
|
if len(result)==5: obs,r,t,tr,info=result; done=bool(t[0] or tr[0])
|
|
else: obs,r,d,info=result; done=bool(d[0])
|
|
total+=float(r[0]); steps+=1
|
|
status='✅ FULL' if steps>=2000 else f'❌ crash@{steps}'
|
|
log(f' ep{ep}: {total:.1f} reward / {steps} steps — {status}')
|
|
time.sleep(1)
|
|
ev.close(); time.sleep(3)
|
|
return track_id
|
|
|
|
current = 'donkey-mountain-track-v0'
|
|
current = eval_track(current, 'donkey-mountain-track-v0', 'mountain_track (training)')
|
|
current = eval_track(current, 'donkey-generated-track-v0', 'generated_track (zero-shot)')
|
|
current = eval_track(current, 'donkey-minimonaco-track-v0', 'mini_monaco (zero-shot)')
|
|
current = eval_track(current, 'donkey-generated-roads-v0', 'generated_road (zero-shot)')
|
|
|
|
log('\n=== Exp 7 COMPLETE ===')
|