Skip to content

Instantly share code, notes, and snippets.

@tristanls
Last active December 17, 2024 14:44
Show Gist options
  • Save tristanls/ef2cec968941f0225143983ef33e6923 to your computer and use it in GitHub Desktop.
Save tristanls/ef2cec968941f0225143983ef33e6923 to your computer and use it in GitHub Desktop.
tbp.monty sequence diagrams

BasePolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file

    E ->>+ BP : __init__(action_space, file_name, file_names_per_episode, ...)
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- E : 

    E ->>+ BP : pre_episode
    BP -->>- E : 

    E ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ BP : dynamic_call
        BP ->> BP : get_random_action(action)
        activate BP
            loop
                alt if rand() < switch_frequency:
                    BP ->>+ AS : sample
                    AS -->>- BP : action
                end
                break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                    BP -->> BP : action
                end
            end
        deactivate BP
        BP ->> BP : get_next_amount(action)
        BP -->>- MS : action, amount
    end
    MS ->>+ BP : post_action(action, amount)
    BP -->>- MS : 
    MS -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E : 
Loading

InformedEnvironmentDataLoader + InformedPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant DL as InformedEnvironmentDataLoader
    participant GPOOS as get_perc_on_obj_semantic

    E ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->>+ Dataset : reset
        Dataset -->>- DL : observation, state
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
        DL ->>+ Dataset : env._agents[0].action_space_type
        Dataset -->>- DL : action_space_type
        alt not action_space_type == "surface_agent"
            DL ->> DL : get_good_view("view_finder")
            activate DL
                DL -->> DL : See details of get_good_view below within __next__ call
            deactivate DL
        end
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL -->>- E : 

    E ->>+ DL : __next__
    alt _counter=0
        DL ->> DL : first_step
        activate DL
            DL ->> BP : state[agent_id]["motor_only_step"]=isinstance(MotorSystem, SurfacePolicy)
            DL -->> E : i, observation
        deactivate DL
    else
        DL ->>+ IP : use_goal_state_driven_actions
        IP -->>- DL : use_goal_state_driven_actions?
        DL ->>+ JTGM : driving_goal_state
        JTGM -->>- DL : driving_goal_state?
        alt use_goal_state_driven_actions and driving_goal_state is not None
            DL ->> DL : execute_jump_attempt
            activate DL
                DL ->>+ BP : state
                BP -->>- DL : state
                DL ->>+ JTGM : derive_habitat_goal_state
                alt driving_goal_state is not None
                    JTGM ->> JTGM : set_driving_goal_state(None)
                    JTGM -->> DL : target_loc, target_quat
                else
                    JTGM -->> DL : None, None
                end
                deactivate JTGM
                DL ->>+ Dataset : __getitem__(agent_id + ".set_agent_pose", target_amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ Dataset : __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ IP : get_depth_at_center(observation, ...)
                IP -->>- DL : 
                alt depth_at_center < 1.0
                    DL ->> DL : handle_successful_jump
                    activate DL
                        DL ->> DL : get_good_view("view_finder")
                        activate DL
                            alt num_distractors > 0
                                DL ->>+ IP : search_for_object
                                IP ->>+ BP : get_agent_state
                                BP -->>- IP : state
                                IP -->>- DL : actions, amounts, on_object
                                alt not on_object
                                    loop actions, amounts
                                        DL ->>+ Dataset : __getitem__(action, amount)
                                        Dataset -->>- DL : observation, state
                                        DL ->> BP : state=state
                                    end
                                end
                            end
                            DL ->>+ IP : move_close_enough
                            IP ->>+ GPOOS : __call__(view, target_semantic_id)
                            GPOOS -->>- IP : perc_on_target_obj
                            IP -->>- DL : action, amount, close_enough
                            loop while not close_enough
                                DL ->>+ Dataset : __getitem__(action, amount)
                                Dataset -->>- DL : observation, state
                                DL ->> BP : state=state
                                DL ->>+ IP : move_close_enough
                                IP ->>+ GPOOS : __call__(view, target_semantic_id)
                                GPOOS -->>- IP : perc_on_target_obj
                                IP -->>- DL : action, amount, close_enough
                            end
                            DL ->>+ IP : search_for_object
                            IP ->>+ BP : get_agent_state
                            BP -->>- IP : state
                            IP -->>- DL : actions, amounts, on_object
                            alt not on_object
                                loop actions, amounts
                                    DL ->>+ Dataset : __getitem__(action, amount)
                                    Dataset -->>- DL : observation, state
                                    DL ->> BP : state=state
                                end
                            end
                        deactivate DL
                    deactivate DL
                else
                    DL ->> DL : handle_failed_jump
                    activate DL
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_agent_pose", target_amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ BP : state
                        BP -->>- DL : state
                    deactivate DL
                end
                DL ->> BP : state[agent_id]["motor_only_step"]=True
                DL ->>+ BP : action
                BP -->>- DL : action
                DL ->>+ IP : post_action(action, 0)
                IP -->>- DL : 
                DL -->> E : i, observation
            deactivate DL
        else
            DL ->>+ MS : __call__
            alt is_predefined
                MS ->>+ BP : predefined_call
                BP -->>- MS: action, amount
            else
                MS ->>+ IP : dynamic_call
                IP ->>+ BP : last_action
                BP -->>- IP : last_action, last_amount
                IP ->>+ PO : get_on_object
                PO -->>- IP : on_object?
                alt not on_object
                    IP -->> MS : last_action, -last_amount
                else
                    IP ->>+ BP : dynamic_call
                    BP ->> BP : get_random_action(action)
                    BP ->> BP : get_next_amount(action)
                    BP -->>- IP : action, amount
                    IP -->> MS: action, amount
                end
                deactivate IP
            end
            MS ->>+ IP : post_action(action, amount)
            IP -->>- MS : 
            MS -->>- DL : action, amount
            alt "orient" in action or "tangentially" in action
                DL ->> DL : update_habitat_sim_constraint
                activate DL
                    DL --x CRASH : get_next_constraint
                deactivate DL
            else
                DL ->>+ Dataset: __getitem__(action, amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->> BP : state[agent_id]["motor_only_step"]=True|False
            end
            DL -->>- E : i, observation
        end
    end

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

InformedEnvironmentDataLoader + NaiveScanPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant NSP as NaiveScanPolicy
    participant DL as InformedEnvironmentDataLoader
    participant GPOOS as get_perc_on_obj_semantic

    E ->>+ NSP : __init__
    NSP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- NSP : 
    NSP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ NSP : pre_episode
    NSP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- NSP : 
    NSP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->>+ Dataset : reset
        Dataset -->>- DL : observation, state
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
        DL ->>+ Dataset : env._agents[0].action_space_type
        Dataset -->>- DL : action_space_type
        alt not action_space_type == "surface_agent"
            DL ->> DL : get_good_view("view_finder")
            activate DL
                DL -->> DL : See details of get_good_view below within __next__ call
            deactivate DL
        end
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL -->>- E : 

    E ->>+ DL : __next__
    alt _counter=0
        DL ->> DL : first_step
        activate DL
            DL ->> BP : state[agent_id]["motor_only_step"]=isinstance(MotorSystem, SurfacePolicy)
            DL -->> E : i, observation
        deactivate DL
    else
        DL ->>+ IP : use_goal_state_driven_actions
        IP -->>- DL : use_goal_state_driven_actions?
        DL ->>+ JTGM : driving_goal_state
        JTGM -->>- DL : driving_goal_state?
        alt use_goal_state_driven_actions and driving_goal_state is not None
            DL ->> DL : execute_jump_attempt
            activate DL
                DL ->>+ BP : state
                BP -->>- DL : state
                DL ->>+ JTGM : derive_habitat_goal_state
                alt driving_goal_state is not None
                    JTGM ->> JTGM : set_driving_goal_state(None)
                    JTGM -->> DL : target_loc, target_quat
                else
                    JTGM -->> DL : None, None
                end
                deactivate JTGM
                DL ->>+ Dataset : __getitem__(agent_id + ".set_agent_pose", target_amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ Dataset : __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ IP : get_depth_at_center(observation, ...)
                IP -->>- DL : 
                alt depth_at_center < 1.0
                    DL ->> DL : handle_successful_jump
                    activate DL
                        DL ->> DL : get_good_view("view_finder")
                        activate DL
                            alt num_distractors > 0
                                DL ->>+ IP : search_for_object
                                IP ->>+ BP : get_agent_state
                                BP -->>- IP : state
                                IP -->>- DL : actions, amounts, on_object
                                alt not on_object
                                    loop actions, amounts
                                        DL ->>+ Dataset : __getitem__(action, amount)
                                        Dataset -->>- DL : observation, state
                                        DL ->> BP : state=state
                                    end
                                end
                            end
                            DL ->>+ IP : move_close_enough
                            IP ->>+ GPOOS : __call__(view, target_semantic_id)
                            GPOOS -->>- IP : perc_on_target_obj
                            IP -->>- DL : action, amount, close_enough
                            loop while not close_enough
                                DL ->>+ Dataset : __getitem__(action, amount)
                                Dataset -->>- DL : observation, state
                                DL ->> BP : state=state
                                DL ->>+ IP : move_close_enough
                                IP ->>+ GPOOS : __call__(view, target_semantic_id)
                                GPOOS -->>- IP : perc_on_target_obj
                                IP -->>- DL : action, amount, close_enough
                            end
                            DL ->>+ IP : search_for_object
                            IP ->>+ BP : get_agent_state
                            BP -->>- IP : state
                            IP -->>- DL : actions, amounts, on_object
                            alt not on_object
                                loop actions, amounts
                                    DL ->>+ Dataset : __getitem__(action, amount)
                                    Dataset -->>- DL : observation, state
                                    DL ->> BP : state=state
                                end
                            end
                        deactivate DL
                    deactivate DL
                else
                    DL ->> DL : handle_failed_jump
                    activate DL
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_agent_pose", target_amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ BP : state
                        BP -->>- DL : state
                    deactivate DL
                end
                DL ->> BP : state[agent_id]["motor_only_step"]=True
                DL ->>+ BP : action
                BP -->>- DL : action
                DL ->>+ IP : post_action(action, 0)
                IP -->>- DL : 
                DL -->> E : i, observation
            deactivate DL
        else
            DL ->>+ MS : __call__
            alt is_predefined
                MS ->>+ BP : predefined_call
                BP -->>- MS: action, amount
            else
                MS ->>+ NSP : dynamic_call
                alt steps_per_action * fixed_amount >= 90
                    NSP -->> DL : StopIteration
                else
                    NSP ->> NSP : check_cycle_action
                    NSP -->> MS : action, amount
                end
                deactivate NSP
            end
            MS ->>+ IP : post_action(action, amount)
            IP -->>- MS : 
            MS -->>- DL : action, amount
            alt "orient" in action or "tangentially" in action
                DL ->> DL : update_habitat_sim_constraint
                activate DL
                    DL --x CRASH : get_next_constraint
                deactivate DL
            else
                DL ->>+ Dataset: __getitem__(action, amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->> BP : state[agent_id]["motor_only_step"]=True|False
            end
            DL -->>- E : i, observation
        end
    end

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

InformedEnvironmentDataLoader + SurfacePolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant SP as SurfacePolicy
    participant DL as InformedEnvironmentDataLoader
    participant GPOOS as get_perc_on_obj_semantic

    E ->>+ SP : __init__
    SP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ SP : get_next_amount(action)
    alt no processed_observations
        SP -->> SP : None
    end
    SP -->>- BP : 
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- SP : 
    SP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->>+ Dataset : reset
        Dataset -->>- DL : observation, state
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
        DL ->>+ Dataset : env._agents[0].action_space_type
        Dataset -->>- DL : action_space_type
        alt not action_space_type == "surface_agent"
            DL ->> DL : get_good_view("view_finder")
            activate DL
                alt num_distractors > 0
                    DL ->>+ IP : search_for_object
                    IP ->>+ BP : get_agent_state
                    BP -->>- IP : state
                    IP -->>- DL : actions, amounts, on_object
                    alt not on_object
                        loop actions, amounts
                            DL ->>+ Dataset : __getitem__(action, amount)
                            Dataset -->>- DL : observation, state
                            DL ->> BP : state=state
                        end
                    end
                end
                DL ->>+ IP : move_close_enough
                IP ->>+ GPOOS : __call__(view, target_semantic_id)
                GPOOS -->>- IP : perc_on_target_obj
                IP -->>- DL : action, amount, close_enough
                loop while not close_enough
                    DL ->>+ Dataset : __getitem__(action, amount)
                    Dataset -->>- DL : observation, state
                    DL ->> BP : state=state
                    DL ->>+ IP : move_close_enough
                    IP ->>+ GPOOS : __call__(view, target_semantic_id)
                    GPOOS -->>- IP : perc_on_target_obj
                    IP -->>- DL : action, amount, close_enough
                end
                DL ->>+ IP : search_for_object
                IP ->>+ BP : get_agent_state
                BP -->>- IP : state
                IP -->>- DL : actions, amounts, on_object
                alt not on_object
                    loop actions, amounts
                        DL ->>+ Dataset : __getitem__(action, amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                    end
                end
            deactivate DL
        end
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL -->>- E : 

    E ->>+ DL : __next__
    alt _counter=0
        DL ->> DL : first_step
        activate DL
            DL ->> BP : state[agent_id]["motor_only_step"]=isinstance(MotorSystem, SurfacePolicy)
            DL -->> E : i, observation
        deactivate DL
    else
        DL ->>+ IP : use_goal_state_driven_actions
        IP -->>- DL : use_goal_state_driven_actions?
        DL ->>+ JTGM : driving_goal_state
        JTGM -->>- DL : driving_goal_state?
        alt use_goal_state_driven_actions and driving_goal_state is not None
            DL ->> DL : execute_jump_attempt
            activate DL
                DL ->>+ BP : state
                BP -->>- DL : state
                DL ->>+ JTGM : derive_habitat_goal_state
                alt driving_goal_state is not None
                    JTGM ->> JTGM : set_driving_goal_state(None)
                    JTGM -->> DL : target_loc, target_quat
                else
                    JTGM -->> DL : None, None
                end
                deactivate JTGM
                DL ->>+ Dataset : __getitem__(agent_id + ".set_agent_pose", target_amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ Dataset : __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ IP : get_depth_at_center(observation, ...)
                IP -->>- DL : 
                alt depth_at_center < 1.0
                    DL ->> DL : handle_successful_jump
                    activate DL
                        DL ->>+ BP : action=agent_id+".move_tangentially"
                        DL --x CRASH : action_details.append
                    deactivate DL
                else
                    DL ->> DL : handle_failed_jump
                    activate DL
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_agent_pose", target_amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ BP : state
                        BP -->>- DL : state
                    deactivate DL
                end
                DL ->> BP : state[agent_id]["motor_only_step"]=True
                DL ->>+ BP : action
                BP -->>- DL : action
                DL ->>+ IP : post_action(action, 0)
                IP -->>- DL : 
                DL -->> E : i, observation
            deactivate DL
        else
            DL ->>+ MS : __call__
            alt is_predefined
                MS ->>+ BP : predefined_call
                BP -->>- MS: action, amount
            else
                MS ->>+ SP : dynamic_call
                SP ->>+ PO : get_feature_by_name("object_coverage")
                PO -->>- SP : object_coverage
                alt object_coverate < 0.1
                    SP -->> MS : None, None
                else action is None
                    SP ->> SP : action=agent_id+".move_forward"
                    SP ->> SP : get_next_action
                    activate SP
                        SP ->>+ BP : last_action
                        BP -->>- SP : last_action, last_amount
                        alt "move_forward" in last_action
                            SP -->> SP : "orient_horizontal"
                        else "orient_horizontal" in last_action
                            SP -->> SP : "orient_vertical"
                        else "orient_vertical" in last_action
                            SP -->> SP : "move_tangentially"
                        else "move_tangentially" in last_action
                            SP ->>+ PO : get_on_object
                            PO -->>- SP : on_object?
                            alt if not on_object
                                SP -->> SP : "orient_horizontal"
                            else
                                SP -->> SP : "move_forward"
                            end
                        end
                    deactivate SP
                    SP ->> SP : get_next_amount(action)
                    activate SP
                        alt no processed_observations
                            SP -->> SP : None
                        end
                        alt "move_tangentially" in action
                            SP ->>+ PO : get_feature_by_name("object_coverage")
                            PO -->>- SP : object_coverage
                            alt object_coverage < 0.2
                                SP ->>+ PO : get_feature_by_name("object_coverage")
                                PO -->>- SP : object_coverage
                                SP -->> SP : amount / (4 / object_coverage)
                            else object_coverage < 0.75
                                SP -->> SP : amount / 4
                            else 
                                SP -->> SP : amount
                            end
                        end
                    deactivate SP
                    SP -->> MS: action, amount
                end
                deactivate SP
            end
            MS ->>+ IP : post_action(action, amount)
            IP -->>- MS : 
            MS -->>- DL : action, amount
            alt action is None
                DL ->>+ SP : touch_object
                SP ->>+ IP : get_depth_at_center
                IP -->>- SP : 
                SP -->>- DL : action, amount, constraint
                DL ->> Dataset: env._env._sim.agents[0].agent_config.action_space[self._action].actuation.constraint=constraint
                DL ->> BP : state[agent_id]["motor_only_step"]=True
            else "orient" in action or "tangentially" in action
                DL ->> DL : update_habitat_sim_constraint
                activate DL
                    DL ->>+ SP : get_next_constraint
                    SP ->>+ PO : non_morphological_features["mean_depth"]
                    PO -->>- SP : depth
                    SP -->>- DL : constraint
                    DL ->> Dataset: env._env._sim.agents[0].agent_config.action_space[self._action].actuation.constraint=constraint
                deactivate DL
            else
                DL ->>+ Dataset: __getitem__(action, amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->> BP : state[agent_id]["motor_only_step"]=True|False
            end
            DL -->>- E : i, observation
        end
    end

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

InformedEnvironmentDataLoader + SurfacePolicyCurvatureInformed

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant SP as SurfacePolicy
    participant SPCI as SurfacePolicyCurvatureInformed
    participant DL as InformedEnvironmentDataLoader
    participant GPOOS as get_perc_on_obj_semantic

    E ->>+ SPCI : __init__
    SPCI ->>+ SP : __init__
    SP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ SP : get_next_amount(action)
    alt no processed_observations
        SP -->> SP : None
    end
    SP -->>- BP : 
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- SP : 
    SP -->>- SPCI : 
    SPCI -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ SPCI : pre_episode
    SPCI ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- SPCI : 
    SPCI -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->>+ Dataset : reset
        Dataset -->>- DL : observation, state
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
        DL ->>+ Dataset : env._agents[0].action_space_type
        Dataset -->>- DL : action_space_type
        alt not action_space_type == "surface_agent"
            DL ->> DL : get_good_view("view_finder")
            activate DL
                alt num_distractors > 0
                    DL ->>+ IP : search_for_object
                    IP ->>+ BP : get_agent_state
                    BP -->>- IP : state
                    IP -->>- DL : actions, amounts, on_object
                    alt not on_object
                        loop actions, amounts
                            DL ->>+ Dataset : __getitem__(action, amount)
                            Dataset -->>- DL : observation, state
                            DL ->> BP : state=state
                        end
                    end
                end
                DL ->>+ IP : move_close_enough
                IP ->>+ GPOOS : __call__(view, target_semantic_id)
                GPOOS -->>- IP : perc_on_target_obj
                IP -->>- DL : action, amount, close_enough
                loop while not close_enough
                    DL ->>+ Dataset : __getitem__(action, amount)
                    Dataset -->>- DL : observation, state
                    DL ->> BP : state=state
                    DL ->>+ IP : move_close_enough
                    IP ->>+ GPOOS : __call__(view, target_semantic_id)
                    GPOOS -->>- IP : perc_on_target_obj
                    IP -->>- DL : action, amount, close_enough
                end
                DL ->>+ IP : search_for_object
                IP ->>+ BP : get_agent_state
                BP -->>- IP : state
                IP -->>- DL : actions, amounts, on_object
                alt not on_object
                    loop actions, amounts
                        DL ->>+ Dataset : __getitem__(action, amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                    end
                end
            deactivate DL
        end
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL -->>- E : 

    E ->>+ DL : __next__
    alt _counter=0
        DL ->> DL : first_step
        activate DL
            DL ->> BP : state[agent_id]["motor_only_step"]=isinstance(MotorSystem, SurfacePolicy)
            DL -->> E : i, observation
        deactivate DL
    else
        DL ->>+ IP : use_goal_state_driven_actions
        IP -->>- DL : use_goal_state_driven_actions?
        DL ->>+ JTGM : driving_goal_state
        JTGM -->>- DL : driving_goal_state?
        alt use_goal_state_driven_actions and driving_goal_state is not None
            DL ->> DL : execute_jump_attempt
            activate DL
                DL ->>+ BP : state
                BP -->>- DL : state
                DL ->>+ JTGM : derive_habitat_goal_state
                alt driving_goal_state is not None
                    JTGM ->> JTGM : set_driving_goal_state(None)
                    JTGM -->> DL : target_loc, target_quat
                else
                    JTGM -->> DL : None, None
                end
                deactivate JTGM
                DL ->>+ Dataset : __getitem__(agent_id + ".set_agent_pose", target_amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ Dataset : __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ IP : get_depth_at_center(observation, ...)
                IP -->>- DL : 
                alt depth_at_center < 1.0
                    DL ->> DL : handle_successful_jump
                    activate DL
                        DL ->>+ BP : action=agent_id+".move_tangentially"
                        DL ->> DL : action_details.append
                    deactivate DL
                else
                    DL ->> DL : handle_failed_jump
                    activate DL
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_agent_pose", target_amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ BP : state
                        BP -->>- DL : state
                    deactivate DL
                end
                DL ->> BP : state[agent_id]["motor_only_step"]=True
                DL ->>+ BP : action
                BP -->>- DL : action
                DL ->>+ IP : post_action(action, 0)
                IP -->>- DL : 
                DL -->> E : i, observation
            deactivate DL
        else
            DL ->>+ MS : __call__
            alt is_predefined
                MS ->>+ BP : predefined_call
                BP -->>- MS: action, amount
            else
                MS ->>+ SP : dynamic_call
                SP ->>+ PO : get_feature_by_name("object_coverage")
                PO -->>- SP : object_coverage
                alt object_coverate < 0.1
                    SP -->> MS : None, None
                else action is None
                    SP ->> SP : action=agent_id+".move_forward"
                    SP ->> SP : get_next_action
                    activate SP
                        SP ->>+ BP : last_action
                        BP -->>- SP : last_action, last_amount
                        alt "move_forward" in last_action
                            SP -->> SP : "orient_horizontal"
                        else "orient_horizontal" in last_action
                            SP -->> SP : "orient_vertical"
                        else "orient_vertical" in last_action
                            SP -->> SP : "move_tangentially"
                        else "move_tangentially" in last_action
                            SP ->>+ PO : get_on_object
                            PO -->>- SP : on_object?
                            alt if not on_object
                                SP -->> SP : "orient_horizontal"
                            else
                                SP -->> SP : "move_forward"
                            end
                        end
                    deactivate SP
                    SP ->> SP : get_next_amount(action)
                    activate SP
                        alt no processed_observations
                            SP -->> SP : None
                        end
                        alt "move_tangentially" in action
                            SP ->>+ PO : get_feature_by_name("object_coverage")
                            PO -->>- SP : object_coverage
                            alt object_coverage < 0.2
                                SP ->>+ PO : get_feature_by_name("object_coverage")
                                PO -->>- SP : object_coverage
                                SP -->> SP : amount / (4 / object_coverage)
                            else object_coverage < 0.75
                                SP -->> SP : amount / 4
                            else 
                                SP -->> SP : amount
                            end
                        end
                    deactivate SP
                    SP -->> MS: action, amount
                end
                deactivate SP
            end
            MS ->>+ IP : post_action(action, amount)
            IP -->>- MS : 
            MS -->>- DL : action, amount
            alt action is None
                DL ->>+ SP : touch_object
                SP ->>+ IP : get_depth_at_center
                IP -->>- SP : 
                SP -->>- DL : action, amount, constraint
                DL ->> Dataset: env._env._sim.agents[0].agent_config.action_space[self._action].actuation.constraint=constraint
                DL ->> BP : state[agent_id]["motor_only_step"]=True
            else "orient" in action or "tangentially" in action
                DL ->> DL : update_habitat_sim_constraint
                activate DL
                    DL ->>+ SPCI : get_next_constraint(action, amount)
                    alt "move_tangentially" in action
                        SPCI ->>+ PO : get_feature_by_name("pose_fully_defined")
                        PO -->>- SPCI : pose_fully_defined?
                        alt pose_fully_defined and ignoring_pc_counter >= min_general_steps
                            SPCI ->> SPCI : perform_pc_guided_step
                            activate SPCI
                                SPCI ->> SPCI : check_for_preference_change
                                SPCI ->> SPCI : determine_pc_for_use
                                activate SPCI
                                    SPCI ->>+ PO : get_feature_by_name("principal_curvatures")
                                    PO -->>- SPCI : principal_curvatures
                                deactivate SPCI
                                SPCI ->>+ PO : get_curvature_directions
                                PO -->>- SPCI : curvature_directions
                                SPCI ->> SPCI : get_inverse_agent_rot
                                alt int(np.argmax(np.abs(rotated_form))) == int(2)
                                    SPCI ->> SPCI : perform_standard_tang_step
                                end
                                SPCI ->> SPCI : update_tangential_reps
                                SPCI ->> SPCI : check_for_flipped_pc
                                SPCI ->> SPCI : avoid_revisiting_locations
                            deactivate SPCI
                        end
                    end
                    SPCI -->>- DL : constraint
                    DL ->> Dataset: env._env._sim.agents[0].agent_config.action_space[self._action].actuation.constraint=constraint
                deactivate DL
            else
                DL ->>+ Dataset: __getitem__(action, amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->> BP : state[agent_id]["motor_only_step"]=True|False
            end
            DL -->>- E : i, observation
        end
    end

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

InformedEnvironmentDataLoader + TouchPolicy

NOTE: TouchPolicy's touch_object is never invoked because isinstance(TouchPolicy, SurfacePolicy) -> False

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant TP as TouchPolicy
    participant DL as InformedEnvironmentDataLoader
    participant GPOOS as get_perc_on_obj_semantic

    E ->>+ TP : __init__
    TP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ TP : get_next_amount(action)
    TP -->>- BP : None
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- TP : 
    TP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ TP : pre_episode
    TP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- TP : 
    TP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->>+ Dataset : reset
        Dataset -->>- DL : observation, state
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
        DL ->>+ Dataset : env._agents[0].action_space_type
        Dataset -->>- DL : action_space_type
        alt not action_space_type == "surface_agent"
            DL ->> DL : get_good_view("view_finder")
            activate DL
                DL -->> DL : See details of get_good_view below within __next__ call
            deactivate DL
        end
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL -->>- E : 

    E ->>+ DL : __next__
    alt _counter=0
        DL ->> DL : first_step
        activate DL
            DL ->> BP : state[agent_id]["motor_only_step"]=isinstance(MotorSystem, SurfacePolicy)
            DL -->> E : i, observation
        deactivate DL
    else
        DL ->>+ IP : use_goal_state_driven_actions
        IP -->>- DL : use_goal_state_driven_actions?
        DL ->>+ JTGM : driving_goal_state
        JTGM -->>- DL : driving_goal_state?
        alt use_goal_state_driven_actions and driving_goal_state is not None
            DL ->> DL : execute_jump_attempt
            activate DL
                DL ->>+ BP : state
                BP -->>- DL : state
                DL ->>+ JTGM : derive_habitat_goal_state
                alt driving_goal_state is not None
                    JTGM ->> JTGM : set_driving_goal_state(None)
                    JTGM -->> DL : target_loc, target_quat
                else
                    JTGM -->> DL : None, None
                end
                deactivate JTGM
                DL ->>+ Dataset : __getitem__(agent_id + ".set_agent_pose", target_amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ Dataset : __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->>+ TP : get_depth_at_center(observation, ...)
                TP -->>- DL : 
                alt depth_at_center < 1.0
                    DL ->> DL : handle_successful_jump
                    activate DL
                        DL ->> DL : get_good_view("view_finder")
                        activate DL
                            alt num_distractors > 0
                                DL ->>+ IP : search_for_object
                                IP ->>+ BP : get_agent_state
                                BP -->>- IP : state
                                IP -->>- DL : actions, amounts, on_object
                                alt not on_object
                                    loop actions, amounts
                                        DL ->>+ Dataset : __getitem__(action, amount)
                                        Dataset -->>- DL : observation, state
                                        DL ->> BP : state=state
                                    end
                                end
                            end
                            DL ->>+ IP : move_close_enough
                            IP ->>+ GPOOS : __call__(view, target_semantic_id)
                            GPOOS -->>- IP : perc_on_target_obj
                            IP -->>- DL : action, amount, close_enough
                            loop while not close_enough
                                DL ->>+ Dataset : __getitem__(action, amount)
                                Dataset -->>- DL : observation, state
                                DL ->> BP : state=state
                                DL ->>+ IP : move_close_enough
                                IP ->>+ GPOOS : __call__(view, target_semantic_id)
                                GPOOS -->>- IP : perc_on_target_obj
                                IP -->>- DL : action, amount, close_enough
                            end
                            DL ->>+ IP : search_for_object
                            IP ->>+ BP : get_agent_state
                            BP -->>- IP : state
                            IP -->>- DL : actions, amounts, on_object
                            alt not on_object
                                loop actions, amounts
                                    DL ->>+ Dataset : __getitem__(action, amount)
                                    Dataset -->>- DL : observation, state
                                    DL ->> BP : state=state
                                end
                            end
                        deactivate DL
                    deactivate DL
                else
                    DL ->> DL : handle_failed_jump
                    activate DL
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_agent_pose", target_amount)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ Dataset: __getitem__(agent_id + ".set_sensor_rotation", quaternion.one)
                        Dataset -->>- DL : observation, state
                        DL ->> BP : state=state
                        DL ->>+ BP : state
                        BP -->>- DL : state
                    deactivate DL
                end
                DL ->> BP : state[agent_id]["motor_only_step"]=True
                DL ->>+ BP : action
                BP -->>- DL : action
                DL ->>+ IP : post_action(action, 0)
                IP -->>- DL : 
                DL -->> E : i, observation
            deactivate DL
        else
            DL ->>+ TP : __call__
            TP ->> TP : get_next_action
            activate TP
                TP ->>+ BP : last_action
                BP -->>- TP : action, amount
                alt "move_tangentially" in action
                    TP ->>+ PO : ["features"]["on_object"]
                    PO -->>- TP : on_object?
                end
            deactivate TP
            TP ->> TP : get_next_amount(action)
            activate TP
                alt
                    TP ->>+ PO : features
                    PO -->>- TP : 
                end
            deactivate TP
            TP ->>+ IP : post_action(action, amount)
            IP -->>- TP : 
            TP -->>- DL : action, amount
            alt "orient" in action or "tangentially" in action
                DL ->> DL : update_habitat_sim_constraint
                activate DL
                    DL ->>+ TP : get_next_constraint
                    TP ->>+ PO : ["features"]["depth"]
                    PO -->>- TP : depth
                    TP -->>- DL : constraint
                    DL ->> Dataset : env._env._sim.agents[0].agent_config.action_space[self._action].actuation.constraint=constraint
                deactivate DL
            else
                DL ->>+ Dataset: __getitem__(action, amount)
                Dataset -->>- DL : observation, state
                DL ->> BP : state=state
                DL ->> BP : state[agent_id]["motor_only_step"]=True|False
            end
            DL -->>- E : i, observation
        end
    end

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

InformedPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy

    E ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- E : 

    E ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ IP : dynamic_call
        IP ->>+ BP : last_action
        BP -->>- IP : last_action, last_amount
        IP ->>+ PO : get_on_object
        PO -->>- IP : on_object?
        alt not on_object
            IP -->> MS : last_action, -last_amount
        else
            IP ->>+ BP : dynamic_call
            BP ->> BP : get_random_action(action)
            BP ->> BP : get_next_amount(action)
            BP -->>- IP : action, amount
            IP -->> MS: action, amount
        end
        deactivate IP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E : 
Loading

NaiveScanPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant NSP as NaiveScanPolicy
    participant DL as DataLoader

    E ->>+ NSP : __init__
    NSP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- NSP : 
    NSP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ NSP : pre_episode
    NSP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- NSP : 
    NSP -->>- E : 

    E ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ NSP : dynamic_call
        alt if steps_per_action * fixed_amount >= 90
            NSP -->> DL : StopIteration
        else
            NSP ->> NSP : check_cycle_action
            NSP -->> MS : action, amount
        end
        deactivate NSP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E :    
Loading

OmniglotDataLoader + InformedPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant DL as OmniglotDataLoader

    E ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __iter__
    DL ->> BP : state=state
    DL ->> BP : state[agent_id]["motor_only_step"]=False
    DL -->>- E : 

    E ->>+ DL : __next__
    DL ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ IP : dynamic_call
        IP ->>+ BP : last_action
        BP -->>- IP : last_action, last_amount
        IP ->>+ PO : get_on_object
        PO -->>- IP : on_object?
        alt not on_object
            IP -->> MS : last_action, -last_amount
        else
            IP ->>+ BP : dynamic_call
            BP ->> BP : get_random_action(action)
            BP ->> BP : get_next_amount(action)
            BP -->>- IP : action, amount
            IP -->> MS: action, amount
        end
        deactivate IP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- DL : action, amount
    DL ->> BP : state=state
    DL -->>- E : i, observation

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

SaccadeOnImageDataLoader + InformedPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant DL as SaccadeOnImageDataLoader

    E ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __next__
    DL ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ IP : dynamic_call
        IP ->>+ BP : last_action
        BP -->>- IP : last_action, last_amount
        IP ->>+ PO : get_on_object
        PO -->>- IP : on_object?
        alt not on_object
            IP -->> MS : last_action, -last_amount
        else
            IP ->>+ BP : dynamic_call
            BP ->> BP : get_random_action(action)
            BP ->> BP : get_next_amount(action)
            BP -->>- IP : action, amount
            IP -->> MS: action, amount
        end
        deactivate IP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- DL : action, amount
    DL ->> BP : state=state
    DL -->>- E : i, observation

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

SaccadeOnImageFromStreamDataLoader + InformedPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant DL as SaccadeOnImageFromStreamDataLoader

    E ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->> BP : get_next_amount(action)
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ DL : pre_episode
    DL ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- DL : 
    DL ->> DL : reset_agent
    activate DL
        DL ->> BP : state=state
        DL ->> BP : state[agent_id]["motor_only_step"]=False
    deactivate DL
    DL -->>- E : 

    E ->>+ DL : __next__
    DL ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ IP : dynamic_call
        IP ->>+ BP : last_action
        BP -->>- IP : last_action, last_amount
        IP ->>+ PO : get_on_object
        PO -->>- IP : on_object?
        alt not on_object
            IP -->> MS : last_action, -last_amount
        else
            IP ->>+ BP : dynamic_call
            BP ->> BP : get_random_action(action)
            BP ->> BP : get_next_amount(action)
            BP -->>- IP : action, amount
            IP -->> MS: action, amount
        end
        deactivate IP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- DL : action, amount
    DL ->> BP : state=state
    DL -->>- E : i, observation

    E ->>+ DL : post_episode
    DL ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- DL : 
    DL -->>- E : 
Loading

SurfacePolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant SP as SurfacePolicy

    E ->>+ SP : __init__
    SP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ SP : get_next_amount(action)
    alt no processed_observations
        SP -->> SP : None
    end
    SP -->>- BP : 
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- SP : 
    SP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ SP : pre_episode
    SP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- SP : 
    SP -->>- E : 

    E ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ SP : dynamic_call
        SP ->>+ PO : get_feature_by_name("object_coverage")
        PO -->>- SP : object_coverage
        alt object_coverage < 0.1
            SP -->> E : None, None
        else
            SP ->> SP : get_next_action
            activate SP
                SP ->>+ BP : last_action
                BP -->>- SP : last_action
                alt "move_forward" in last_action
                    SP -->> SP : "orient_horizontal"
                else "orient_horizontal" in last_action
                    SP -->> SP : "orient_vertical"
                else "orient_vertical" in last_action
                    SP -->> SP : "move_tangentially"
                else "move_tangentially" in last_action
                    SP ->>+ PO : get_on_object
                    PO -->>- SP : on_object?
                    alt if not on_object
                        SP -->> SP : "orient_horizontal"
                    else
                        SP -->> SP : "move_forward"
                    end
                end
            deactivate SP
            SP ->> SP : get_next_amount(action)
            activate SP
            alt no processed_observations
                SP -->> SP : None
            end
            alt "move_tangentially" in action
                SP ->>+ PO : get_feature_by_name("object_coverage")
                PO -->>- SP : object_coverage
                alt object_coverage < 0.2
                    SP ->>+ PO : get_feature_by_name("object_coverage")
                    PO -->>- SP : object_coverage
                    SP -->> SP : amount / (4 / object_coverage)
                else object_coverage < 0.75
                    SP -->> SP : amount / 4
                else 
                    SP -->> SP : amount
                end
            end
            deactivate SP
            SP -->> E : action, amount
        end
        deactivate SP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E :     
Loading

SurfacePolicyCurvatureInfo

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant SP as SurfacePolicy
    participant SPC as SurfacePolicyCurvatureInformed

    E ->>+ SPC : __init__
    SPC ->>+ SP : __init__
    SP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ SP : get_next_amount(action)
    alt no processed_observations
        SP -->> SP : None
    end
    SP -->>- BP : 
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- SP : 
    SP -->>- SPC : 
    SPC -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ SPC : pre_episode
    SPC ->>+ SP : pre_episode
    SP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- SP : 
    SP -->>- SPC : 
    SPC -->>- E : 

    E ->>+ MS : __call__
    alt is_predefined
        MS ->>+ BP : predefined_call
        BP -->>- MS: action, amount
    else
        MS ->>+ SP : dynamic_call
        SP ->>+ PO : get_feature_by_name("object_coverage")
        PO -->>- SP : object_coverage
        alt object_coverage < 0.1
            SP -->> E : None, None
        else
            SP ->> SP : get_next_action
            activate SP
                SP ->>+ BP : last_action
                BP -->>- SP : last_action
                alt "move_forward" in last_action
                    SP -->> SP : "orient_horizontal"
                else "orient_horizontal" in last_action
                    SP -->> SP : "orient_vertical"
                else "orient_vertical" in last_action
                    SP -->> SP : "move_tangentially"
                else "move_tangentially" in last_action
                    SP ->>+ PO : get_on_object
                    PO -->>- SP : on_object?
                    alt if not on_object
                        SP -->> SP : "orient_horizontal"
                    else
                        SP -->> SP : "move_forward"
                    end
                end
            deactivate SP
            SP ->> SP : get_next_amount(action)
            activate SP
            alt no processed_observations
                SP -->> SP : None
            end
            alt "move_tangentially" in action
                SP ->>+ PO : get_feature_by_name("object_coverage")
                PO -->>- SP : object_coverage
                alt object_coverage < 0.2
                    SP ->>+ PO : get_feature_by_name("object_coverage")
                    PO -->>- SP : object_coverage
                    SP -->> SP : amount / (4 / object_coverage)
                else object_coverage < 0.75
                    SP -->> SP : amount / 4
                else 
                    SP -->> SP : amount
                end
            end
            deactivate SP
            SP -->> E : action, amount
        end
        deactivate SP
    end
    MS ->>+ IP : post_action(action, amount)
    IP -->>- MS : 
    MS -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E :        
Loading

TouchPolicy

sequenceDiagram
    participant E as entrypoint
    participant AS as ActionSpace
    participant MS as MotorSystem
    participant BP as BasePolicy
    participant RAF as read_action_file
    participant JTGM as JumpToGoalStateMixin
    participant IP as InformedPolicy
    participant TP as TouchPolicy

    E ->>+ TP : __init__
    TP ->>+ IP : __init__
    IP ->>+ BP : __init__
    BP ->>+ AS : sample
    AS -->>- BP : action
    BP ->> BP : get_random_action(action)
    activate BP
        loop
            alt if rand() < switch_frequency:
                BP ->>+ AS : sample
                AS -->>- BP : action
            end
            break if action != agent_id+".set_agent_pose" and action != agent_id+".set_sensor_rotation"
                BP -->> BP : action
            end
        end
    deactivate BP
    BP ->> BP : create_action_name_to_sample_fn
    BP ->>+ TP : get_next_amount(action)
    alt if no processed_observations
        TP -->> TP : None
    end
    TP -->>- BP : 
    alt if file_names_per_episode is not None
        BP ->> BP : is_predefined = True
    end
    alt if file_name is not None
        BP ->>+ RAF : __call__(file_name)
        RAF -->>- BP : action_list, amount_list
        BP ->> BP : is_predefined = True
    end
    BP -->>- IP : 
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : __init__
        JTGM -->>- IP : 
    end
    IP -->>- TP : 
    TP -->>- E : 

    create participant PO as processed_observations
    E ->> PO : 

    E ->>+ TP : pre_episode
    TP ->>+ IP : pre_episode
    alt use_goal_state_driven_actions
        IP ->>+ JTGM : pre_episode
        JTGM ->> JTGM : set_driving_goal_state(None)
        JTGM -->>- IP : 
    end
    IP ->>+ BP : pre_episode
    BP -->>- IP : 
    IP -->>- TP : 
    TP -->>- E : 

    E ->>+ TP : __call__
    TP ->> TP : get_next_action
    activate TP
        TP ->>+ BP : last_action
        BP -->>- TP : last_action
        alt "move_forward" in last_action
            TP -->> TP : "orient_horizontal"
        else "orient_horizontal" in last_action
            TP -->> TP : "orient_vertical"
        else "orient_vertical" in last_action
            TP -->> TP : "move_tangentially"
        else "move_tangentially" in last_action
            TP ->>+ PO : get_on_object
            PO -->>- TP : on_object?
            alt if not on_object
                TP -->> TP : "orient_horizontal"
            else
                TP -->> TP : "move_forward"
            end
        end
    deactivate TP
    TP ->> TP : get_next_amount(action)
    activate TP
        alt if no processed_observations
            TP -->> TP : None
        else
            alt "orient" in action
                TP ->> TP : orienting_angle_from_normal(action)
                activate TP
                    TP ->>+ PO : point_normal
                    PO -->>- TP : 
                deactivate TP
            end
        end
    deactivate TP
    TP -->>- E : action, amount

    E ->>+ BP : post_episode
    alt file_names_per_episode is not None
        alt episode_count in file_names_per_episode
            BP ->>+ RAF : __call__(file_name)
            RAF -->>- BP : action_list, amount_list
        end
    end
    BP -->>- E : 
Loading
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment