Hello,
It seems that there's a problem in the state_feature_extractor
in the agent.py
file.
Could you please see the following errors when executing the 'test_demo' script?
Best regards,
Ronen.
+ echo Logging output to outputs/dummy/test_log.txt.2021-07-20_10-26-24
Logging output to outputs/dummy/test_log.txt.2021-07-20_10-26-24
+ python -m core.train_test_offline --expert --pretrained output/demo_model --test --render
pybullet build time: Jul 14 2021 10:11:29
Output will be saved to `output/demo_model`
Using config:
{'DATA_ROOT_DIR': 'data/scenes',
'EPOCHS': 200,
'EXPERIMENT_OBJ_INDEX_DIR': 'experiments/object_index',
'IMG_SIZE': [112, 112],
'LOG': True,
'MODEL_SPEC_DIR': 'experiments/model_spec',
'OBJECT_DATA_DIR': 'data/objects',
'OFFLINE_BATCH_SIZE': 100,
'OFFLINE_RL_MEMORY_SIZE': 100000,
'ONPOLICY_MEMORY_SIZE': -1,
'OUTPUT_DIR': 'output',
'OUTPUT_MISC_DIR': 'output_misc',
'RL_DATA_ROOT_DIR': 'data/scenes',
'RL_IMG_SIZE': [112, 112],
'RL_MAX_STEP': 20,
'RL_MEMORY_SIZE': 2000000,
'RL_MODEL_SPEC': 'output/demo_model/rl_pointnet_model_spec.yaml',
'RL_SAVE_DATA_NAME': 'data_50k.npz',
'RL_SAVE_DATA_ROOT_DIR': 'data',
'RL_TEST_SCENE': 'data/gaddpg_scenes',
'RL_TRAIN': {'DAGGER_MAX_STEP': 18,
'DAGGER_MIN_STEP': 5,
'DAGGER_RATIO': 0.5,
'DART_MAX_STEP': 18,
'DART_MIN_STEP': 5,
'DART_RATIO': 0.5,
'ENV_FAR': 0.5,
'ENV_NEAR': 0.2,
'ENV_RESET_TRIALS': 7,
'EXPERT_INIT_MAX_STEP': 15,
'EXPERT_INIT_MIN_STEP': 3,
'RL': True,
'SAVE_EPISODE_INTERVAL': 50,
'accumulate_points': True,
'action_noise': 0.01,
'batch_size': 125,
'bc_reward_flag': False,
'buffer_full_size': -1,
'buffer_start_idx': 0,
'change_dynamics': False,
'channel_num': 5,
'clip_grad': 0.5,
'concat_option': 'point_wise',
'critic_aux': True,
'critic_extra_latent': -1,
'critic_goal': False,
'dagger': True,
'dart': True,
'ddpg_coefficients': [0.5, 0.001, 1.0003, 1.0, 0.2],
'domain_randomization': False,
'env_name': 'PandaYCBEnv',
'env_num_objs': 1,
'expert_initial_state': False,
'explore_cap': 1.0,
'explore_ratio': 1.0,
'explore_ratio_list': [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7],
'feature_input_dim': 512,
'fill_data_step': 10,
'fix_timestep_test': True,
'gamma': 0.95,
'goal_reward_flag': False,
'head_lr': 0.0003,
'hidden_size': 256,
'index_file': 'experiments/object_index/extra_shape.json',
'index_split': 'train',
'init_distance_high': 0.45,
'init_distance_low': 0.15,
'load_buffer': False,
'load_obj_num': 40,
'load_scene_joint': False,
'load_test_scene_new': False,
'log': True,
'lr': 0.0003,
'lr_gamma': 0.5,
'max_epoch': 150000,
'max_num_pts': 20000,
'mix_milestones': [4000,
8000,
16000,
25000,
35000,
45000,
65000,
85000,
100000,
120000],
'mix_policy_ratio_list': [0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2],
'mix_value_ratio_list': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
'new_scene': True,
'noise_ratio_list': [3.0, 2.5, 2.0, 1.5, 1.2, 1.2, 1, 0.8, 0.5],
'noise_type': 'uniform',
'num_remotes': 8,
'off_policy': True,
'online_buffer_ratio': 0.7,
'onpolicy': True,
'overwrite_feat_milestone': [],
'policy_aux': True,
'policy_extra_latent': -1,
'policy_goal': False,
'policy_milestones': [20000, 40000, 60000, 80000],
'policy_update_gap': 2,
'pt_accumulate_ratio': 0.95,
'refill_buffer': True,
'reinit_factor': 3,
'reinit_lr': 0.0001,
'reinit_optim': False,
'sa_channel_concat': True,
'save_epoch': [3000,
10000,
20000,
40000,
80000,
100000,
140000,
180000,
200000],
'self_supervision': False,
'shared_feature': False,
'shared_objects_across_worker': False,
'target_update_interval': 3000,
'tau': 0.0001,
'train_feature': True,
'train_goal_feature': False,
'train_value_feature': True,
'uniform_num_pts': 1024,
'updates_per_step': 20,
'use_action_limit': True,
'use_expert_plan': False,
'use_image': False,
'use_point_state': True,
'use_time': True,
'value_lr': 0.0003,
'value_lr_gamma': 0.5,
'value_milestones': [20000, 40000, 60000, 80000],
'value_model': True,
'visdom': True},
'RNG_SEED': 3,
'ROBOT_DATA_DIR': 'data/robots',
'ROOT_DIR': '/home/nir1tv/projects/GA-DDPG/experiments/../',
'SCRIPT_FOLDER': 'experiments/cfgs',
'env_config': {'accumulate_points': True,
'action_space': 'task6d',
'change_dynamics': False,
'data_type': 'RGBDM',
'domain_randomization': False,
'expert_step': 20,
'height': 112,
'img_resize': [112, 112],
'initial_far': 0.5,
'initial_near': 0.2,
'numObjects': 1,
'omg_config': {'allow_collision_point': 0,
'base_obstacle_weight': 1.0,
'clearance': 0.03,
'dynamic_timestep': False,
'extra_smooth_steps': 5,
'goal_idx': -1,
'ik_clearance': 0.07,
'ik_parallel': False,
'ik_seed_num': 13,
'increment_iks': True,
'ol_alg': 'Proj',
'optim_steps': 1,
'pre_terminate': True,
'root_dir': '/home/nir1tv/projects/GA-DDPG/experiments/../',
'scene_file': '',
'silent': True,
'smoothness_base_weight': 3,
'standoff_dist': 0.08,
'target_epsilon': 0.06,
'target_hand_filter_angle': 90,
'target_obj_collision': 1,
'terminate_smooth_loss': 3,
'timesteps': 20,
'traj_delta': 0.05,
'traj_init': 'grasp',
'traj_interpolate': 'linear',
'traj_max_step': 26,
'traj_min_step': 15,
'use_expert_plan': False,
'vis': False},
'pt_accumulate_ratio': 0.95,
'random_target': True,
'regularize_pc_point_count': True,
'uniform_num_pts': 1024,
'use_hand_finger_point': True,
'width': 112},
'omg_config': {'allow_collision_point': 0,
'base_obstacle_weight': 1.0,
'clearance': 0.03,
'dynamic_timestep': False,
'extra_smooth_steps': 5,
'goal_idx': -1,
'ik_clearance': 0.07,
'ik_parallel': False,
'ik_seed_num': 13,
'increment_iks': True,
'ol_alg': 'Proj',
'optim_steps': 1,
'pre_terminate': True,
'root_dir': '/home/nir1tv/projects/GA-DDPG/experiments/../',
'scene_file': '',
'silent': True,
'smoothness_base_weight': 3,
'standoff_dist': 0.08,
'target_epsilon': 0.06,
'target_hand_filter_angle': 90,
'target_obj_collision': 1,
'terminate_smooth_loss': 3,
'timesteps': 20,
'traj_delta': 0.05,
'traj_init': 'grasp',
'traj_interpolate': 'linear',
'traj_max_step': 26,
'traj_min_step': 15,
'use_expert_plan': False,
'vis': False},
'pretrained_time': '',
'script_name': 'td3_critic_aux_policy_aux.yaml'}
Let's use 2 GPUs!
schedule: [8000, 16000, 30000, 50000, 70000, 90000]
schedule: [8000, 16000, 30000, 50000, 70000, 90000]
Output will be saved to `output/demo_model`
video output: YCB_td3_critic_aux_policy_aux.yaml stat output: rollout_success.script_td3_critic_aux_policy_aux.yaml.txt
load pretrained policy!!!!
load pretrained critic!!!!
load feat optim
load pretrained feature!!!! from: output/demo_model/DDPG_state_feat_PandaYCBEnv_latest step :249921
output_time: demo_model logdir: output/demo_model/PandaYCBEnv_DDPG
startThreads creating 1 threads.
starting thread 0
started thread 0
argc=2
argv[0] = --unused
argv[1] = --start_demo_name=Physics Server
ExampleBrowserThreadFunc started
X11 functions dynamically loaded using dlopen/dlsym OK!
X11 functions dynamically loaded using dlopen/dlsym OK!
Creating context
Created GL 3.3 context
Direct GLX rendering context obtained
Making context current
GL_VENDOR=NVIDIA Corporation
GL_RENDERER=NVIDIA GeForce RTX 2080 Ti/PCIe/SSE2
GL_VERSION=3.3.0 NVIDIA 470.42.01
GL_SHADING_LANGUAGE_VERSION=3.30 NVIDIA via Cg compiler
pthread_getconcurrency()=0
Version = 3.3.0 NVIDIA 470.42.01
Vendor = NVIDIA Corporation
Renderer = NVIDIA GeForce RTX 2080 Ti/PCIe/SSE2
b3Printf: Selected demo: Physics Server
startThreads creating 1 threads.
starting thread 0
started thread 0
MotionThreadFunc thread started
numActiveThreads = 0
stopping threads
destroy semaphore
semaphore destroyed
Thread with taskId 0 exiting
Thread TERMINATED
destroy main semaphore
main semaphore destroyed
finished
numActiveThreads = 0
btShutDownExampleBrowser stopping threads
Thread with taskId 0 exiting
Thread TERMINATED
destroy semaphore
semaphore destroyed
destroy main semaphore
main semaphore destroyed
startThreads creating 1 threads.
starting thread 0
started thread 0
argc=2
argv[0] = --unused
argv[1] = --start_demo_name=Physics Server
ExampleBrowserThreadFunc started
X11 functions dynamically loaded using dlopen/dlsym OK!
X11 functions dynamically loaded using dlopen/dlsym OK!
Creating context
Created GL 3.3 context
Direct GLX rendering context obtained
Making context current
GL_VENDOR=NVIDIA Corporation
GL_RENDERER=NVIDIA GeForce RTX 2080 Ti/PCIe/SSE2
GL_VERSION=3.3.0 NVIDIA 470.42.01
GL_SHADING_LANGUAGE_VERSION=3.30 NVIDIA via Cg compiler
pthread_getconcurrency()=0
Version = 3.3.0 NVIDIA 470.42.01
Vendor = NVIDIA Corporation
Renderer = NVIDIA GeForce RTX 2080 Ti/PCIe/SSE2
b3Printf: Selected demo: Physics Server
startThreads creating 1 threads.
starting thread 0
started thread 0
MotionThreadFunc thread started
ven = NVIDIA Corporation
ven = NVIDIA Corporation
/home/nir1tv/projects/GA-DDPG/env/models
>>>> target name: 061_foam_brick
==== loaded scene: scene_0 target: 006_mustard_bottle idx: 5 init joint
Traceback (most recent call last):
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/nir1tv/projects/GA-DDPG/core/train_test_offline.py", line 387, in <module>
test(run_iter=run_iter)
File "/home/nir1tv/projects/GA-DDPG/core/train_test_offline.py", line 230, in test
action, _, _, aux_pred = agent.select_action(state, vis=False, remain_timestep=remain_timestep )
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/nir1tv/projects/GA-DDPG/core/agent.py", line 113, in select_action
train=False,
File "/home/nir1tv/projects/GA-DDPG/core/ddpg.py", line 54, in extract_feature
train=train)
File "/home/nir1tv/projects/GA-DDPG/core/agent.py", line 77, in unpack_batch
train=train,
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 152, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 85, in parallel_apply
output.reraise()
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/_utils.py", line 394, in reraise
raise self.exc_type(msg)
TypeError: Caught TypeError in replica 1 on device 1.
Original Traceback (most recent call last):
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 60, in _worker
output = module(*input, **kwargs)
File "/home/nir1tv/miniconda3/envs/gaddpg/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
TypeError: forward() missing 1 required positional argument: 'pc'
numActiveThreads = 0
stopping threads
Thread with taskId 0 exiting
Thread TERMINATED
destroy semaphore
semaphore destroyed
destroy main semaphore
main semaphore destroyed
finished
numActiveThreads = 0
btShutDownExampleBrowser stopping threads
destroy semaphore
semaphore destroyed
Thread with taskId 0 exiting
Thread TERMINATED
destroy main semaphore
main semaphore destroyed