ppo-Pyramids / run_logs /timers.json
uraskargi's picture
It was fun to work with ML-AGENTS!!
276934a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39097166061401367,
"min": 0.39097166061401367,
"max": 1.4326444864273071,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11841.75,
"min": 11695.240234375,
"max": 43460.703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6403189897537231,
"min": -0.1667199432849884,
"max": 0.6403189897537231,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 181.21026611328125,
"min": -39.51262664794922,
"max": 181.21026611328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011348869651556015,
"min": -0.01969035528600216,
"max": 0.3381563425064087,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2117300033569336,
"min": -5.021040439605713,
"max": 80.14305114746094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06880426084583936,
"min": 0.06602099334261202,
"max": 0.07204867108148315,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0320639126875903,
"min": 0.49542927702245887,
"max": 1.0592615787286823,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016290232432691162,
"min": 0.00044715984124048047,
"max": 0.017264449444064186,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24435348649036742,
"min": 0.006260237777366727,
"max": 0.24435348649036742,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.570977476373333e-06,
"min": 7.570977476373333e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001135646621456,
"min": 0.0001135646621456,
"max": 0.003509074130308699,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252362666666667,
"min": 0.10252362666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378544,
"min": 1.3691136000000002,
"max": 2.5696913,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000262110304,
"min": 0.000262110304,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00393165456,
"min": 0.00393165456,
"max": 0.11699216087000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00832540076225996,
"min": 0.008207755163311958,
"max": 0.3344776928424835,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12488101422786713,
"min": 0.11490856856107712,
"max": 2.341343879699707,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 299.02912621359224,
"min": 295.15533980582524,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30800.0,
"min": 15984.0,
"max": 33028.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6621262001470454,
"min": -1.0000000521540642,
"max": 1.6659922175181723,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 171.19899861514568,
"min": -32.000001668930054,
"max": 171.59719840437174,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6621262001470454,
"min": -1.0000000521540642,
"max": 1.6659922175181723,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 171.19899861514568,
"min": -32.000001668930054,
"max": 171.59719840437174,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02573409063223912,
"min": 0.02573409063223912,
"max": 6.5260189566761255,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6506113351206295,
"min": 2.5975699304253794,
"max": 104.41630330681801,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691714236",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691716567"
},
"total": 2331.2847599140005,
"count": 1,
"self": 1.041851969000163,
"children": {
"run_training.setup": {
"total": 0.03962273999991339,
"count": 1,
"self": 0.03962273999991339
},
"TrainerController.start_learning": {
"total": 2330.2032852050006,
"count": 1,
"self": 1.4623620030674829,
"children": {
"TrainerController._reset_env": {
"total": 5.479988999999932,
"count": 1,
"self": 5.479988999999932
},
"TrainerController.advance": {
"total": 2323.113378192934,
"count": 64020,
"self": 1.5106463309025457,
"children": {
"env_step": {
"total": 1633.6768066770383,
"count": 64020,
"self": 1520.8383699559447,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.97414155901038,
"count": 64020,
"self": 4.735934557027576,
"children": {
"TorchPolicy.evaluate": {
"total": 107.2382070019828,
"count": 62545,
"self": 107.2382070019828
}
}
},
"workers": {
"total": 0.8642951620831809,
"count": 64020,
"self": 0.0,
"children": {
"worker_root": {
"total": 2324.7955204649843,
"count": 64020,
"is_parallel": true,
"self": 921.1916012870058,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002536320000217529,
"count": 1,
"is_parallel": true,
"self": 0.0006544589996337891,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00188186100058374,
"count": 8,
"is_parallel": true,
"self": 0.00188186100058374
}
}
},
"UnityEnvironment.step": {
"total": 0.047729962999937925,
"count": 1,
"is_parallel": true,
"self": 0.000592090999816719,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005110189999868453,
"count": 1,
"is_parallel": true,
"self": 0.0005110189999868453
},
"communicator.exchange": {
"total": 0.044695340000089345,
"count": 1,
"is_parallel": true,
"self": 0.044695340000089345
},
"steps_from_proto": {
"total": 0.0019315130000450154,
"count": 1,
"is_parallel": true,
"self": 0.00037341100005505723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015581019999899581,
"count": 8,
"is_parallel": true,
"self": 0.0015581019999899581
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1403.6039191779785,
"count": 64019,
"is_parallel": true,
"self": 34.38853562699978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.35862423399385,
"count": 64019,
"is_parallel": true,
"self": 23.35862423399385
},
"communicator.exchange": {
"total": 1239.293995746001,
"count": 64019,
"is_parallel": true,
"self": 1239.293995746001
},
"steps_from_proto": {
"total": 106.56276357098386,
"count": 64019,
"is_parallel": true,
"self": 20.869480681042205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.69328288994166,
"count": 512152,
"is_parallel": true,
"self": 85.69328288994166
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 687.925925184993,
"count": 64020,
"self": 2.659672714064527,
"children": {
"process_trajectory": {
"total": 113.09464460893582,
"count": 64020,
"self": 112.79854820893502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29609640000080617,
"count": 2,
"self": 0.29609640000080617
}
}
},
"_update_policy": {
"total": 572.1716078619927,
"count": 453,
"self": 374.46103158794153,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.71057627405116,
"count": 22854,
"self": 197.71057627405116
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0759995348053053e-06,
"count": 1,
"self": 1.0759995348053053e-06
},
"TrainerController._save_models": {
"total": 0.14755493299981026,
"count": 1,
"self": 0.001997058999222645,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14555787400058762,
"count": 1,
"self": 0.14555787400058762
}
}
}
}
}
}
}