MarcusAGray's picture
First push
4f0d48f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42247989773750305,
"min": 0.2507127523422241,
"max": 1.3387961387634277,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12525.68359375,
"min": 7509.3486328125,
"max": 40613.71875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989882.0,
"min": 29952.0,
"max": 989882.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989882.0,
"min": 29952.0,
"max": 989882.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08870507776737213,
"min": -0.09990935772657394,
"max": 0.1302768737077713,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.3779239654541,
"min": -23.978246688842773,
"max": 30.87561798095703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007405513897538185,
"min": 0.007405513897538185,
"max": 0.26140132546424866,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7847288846969604,
"min": 1.7847288846969604,
"max": 61.95211410522461,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0636985083229247,
"min": 0.06222402359897639,
"max": 0.08619558502930869,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8917791165209458,
"min": 0.512270215733496,
"max": 1.0180602577633644,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0011697235171670994,
"min": 1.6828586770101862e-07,
"max": 0.22390341937266836,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01637612924033939,
"min": 2.3560021478142606e-06,
"max": 1.5673239356086786,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00024233366900523563,
"min": 0.00024233366900523563,
"max": 0.009838354287330745,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0033926713660732987,
"min": 0.0033926713660732987,
"max": 0.1001261503987385,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242333571428573,
"min": 0.10242333571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339267000000002,
"min": 1.327104,
"max": 2.4012615000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002520912378571428,
"min": 0.0002520912378571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035292773299999995,
"min": 0.0035292773299999995,
"max": 0.10016602384999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006938199978321791,
"min": 0.006938199978321791,
"max": 0.3823823928833008,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0971347987651825,
"min": 0.0930439904332161,
"max": 2.6766767501831055,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 942.0322580645161,
"min": 877.8,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29203.0,
"min": 15984.0,
"max": 33136.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8136323096771394,
"min": -1.0000000521540642,
"max": -0.398328041434288,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -25.22260159999132,
"min": -32.000001668930054,
"max": -9.9582010358572,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8136323096771394,
"min": -1.0000000521540642,
"max": -0.398328041434288,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -25.22260159999132,
"min": -32.000001668930054,
"max": -9.9582010358572,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0704167797875632,
"min": 0.0704167797875632,
"max": 7.5239295568317175,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.182920173414459,
"min": 2.182920173414459,
"max": 120.38287290930748,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679764548",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679766642"
},
"total": 2093.1895810589995,
"count": 1,
"self": 0.42674449300011474,
"children": {
"run_training.setup": {
"total": 0.10148298900003283,
"count": 1,
"self": 0.10148298900003283
},
"TrainerController.start_learning": {
"total": 2092.6613535769993,
"count": 1,
"self": 1.3110864339460022,
"children": {
"TrainerController._reset_env": {
"total": 5.9665194620001785,
"count": 1,
"self": 5.9665194620001785
},
"TrainerController.advance": {
"total": 2085.2885122760536,
"count": 63002,
"self": 1.4301724810857195,
"children": {
"env_step": {
"total": 1462.7712967929701,
"count": 63002,
"self": 1354.7328569310566,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.19787674897725,
"count": 63002,
"self": 4.6925786661495295,
"children": {
"TorchPolicy.evaluate": {
"total": 102.50529808282772,
"count": 62564,
"self": 102.50529808282772
}
}
},
"workers": {
"total": 0.8405631129362519,
"count": 63002,
"self": 0.0,
"children": {
"worker_root": {
"total": 2087.9617663450654,
"count": 63002,
"is_parallel": true,
"self": 846.8736037550943,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018512480000936193,
"count": 1,
"is_parallel": true,
"self": 0.0005588340009126114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001292413999181008,
"count": 8,
"is_parallel": true,
"self": 0.001292413999181008
}
}
},
"UnityEnvironment.step": {
"total": 0.04606646699994599,
"count": 1,
"is_parallel": true,
"self": 0.0005352190000849077,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048139899990928825,
"count": 1,
"is_parallel": true,
"self": 0.00048139899990928825
},
"communicator.exchange": {
"total": 0.04313178199936374,
"count": 1,
"is_parallel": true,
"self": 0.04313178199936374
},
"steps_from_proto": {
"total": 0.001918067000588053,
"count": 1,
"is_parallel": true,
"self": 0.00044711300142807886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001470953999159974,
"count": 8,
"is_parallel": true,
"self": 0.001470953999159974
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1241.088162589971,
"count": 63001,
"is_parallel": true,
"self": 30.465440786872932,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.175739983982567,
"count": 63001,
"is_parallel": true,
"self": 22.175739983982567
},
"communicator.exchange": {
"total": 1097.9550138070117,
"count": 63001,
"is_parallel": true,
"self": 1097.9550138070117
},
"steps_from_proto": {
"total": 90.49196801210383,
"count": 63001,
"is_parallel": true,
"self": 19.16001759790288,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.33195041420095,
"count": 504008,
"is_parallel": true,
"self": 71.33195041420095
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.0870430019977,
"count": 63002,
"self": 2.3550194468389236,
"children": {
"process_trajectory": {
"total": 111.27673798415708,
"count": 63002,
"self": 111.08167639015664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19506159400043543,
"count": 2,
"self": 0.19506159400043543
}
}
},
"_update_policy": {
"total": 507.4552855710017,
"count": 420,
"self": 322.4564692430504,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.99881632795132,
"count": 22806,
"self": 184.99881632795132
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.980003596865572e-07,
"count": 1,
"self": 9.980003596865572e-07
},
"TrainerController._save_models": {
"total": 0.09523440699922503,
"count": 1,
"self": 0.001514858999144053,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09371954800008098,
"count": 1,
"self": 0.09371954800008098
}
}
}
}
}
}
}