Skip to content

Experiment rules

backend.experiment.rules.base#

BaseRules #

Bases: object

Base class for other rules classes

Source code in backend/experiment/rules/base.py
class BaseRules(object):
    """Base class for other rules classes"""

    contact_email = settings.CONTACT_MAIL
    counted_result_keys = []

    def feedback_info(self) -> FeedbackInfo:
        """
        Return info to shown to the user if they are invited to give feedback
        """
        feedback_body = render_to_string("feedback/user_feedback.html", {"email": self.contact_email})
        return {
            # Header above the feedback form
            "header": _("Do you have any remarks or questions?"),
            # Button text
            "button": _("Submit"),
            # Body of the feedback form, can be HTML. Shown under the button
            "contact_body": feedback_body,
            # Thank you message after submitting feedback
            "thank_you": _("We appreciate your feedback!"),
            # Show a floating button on the right side of the screen to open the feedback form
            "show_float_button": False,
        }

    def calculate_score(
        self, result: Result, data: dict
    ) -> Optional[Union[int, float]]:
        """Use scoring rule to calculate score.

        The function uses the result's scoring rule, if configured, otherwise, returns `None`.

        Args:
            result: the Result object for which to calculate the score
            data: the data of the participant's response

        """
        scoring_rule = SCORING_RULES.get(result.scoring_rule)
        if scoring_rule:
            return scoring_rule(result, data)
        return None

    def get_play_again_url(self, session: Session) -> str:
        """Get the url to play the experiment again

        Args:
            session: current session
        """
        participant_id_url_param = (
            f"?participant_id={session.participant.participant_id_url}"
            if session.participant.participant_id_url
            else ""
        )
        return f"/block/{session.block.slug}{participant_id_url_param}"

    def get_experiment_url(self, session: Session) -> str:
        """
        return the experiment url. Defaults to experiment.slug

        Args:
            session: current session
        """
        participant_id_url_param = (
            f"?participant_id={session.participant.participant_id_url}"
            if session.participant.participant_id_url
            else ""
        )
        return f"/{session.block.phase.experiment.slug}{participant_id_url_param}"

    def get_profile_question_trials(
        self, session: Session, n_questions: int = 1
    ) -> list[Trial]:
        """Get a list of trials for questions not yet answered by the user

        Args:
            session: the current session
            n_questions: the number of questions to return, set to `None` if all questions in the blocks' question lists should be returned at once

        Returns:
            list of `Trial` actions with unanswered questions
        """
        trials = []
        question_lists = session.block.questionlist_set.all()
        if n_questions is None:
            n_questions = sum(ql.questions.count() for ql in question_lists)
        for ql in question_lists:
            questions = ql.questions.order_by("?") if ql.randomize else ql.questions
            question_iterator = get_unanswered_questions(
                session.participant, questions.all()
            )
            while len(trials) < n_questions:
                try:
                    question_obj = next(question_iterator)
                    profile_result = prepare_profile_result(
                        question_obj.key, session.participant
                    )
                    question = question_obj.convert_to_action()
                    question.result_id = profile_result.id
                    trials.append(
                        Trial(
                            title=_("Questionnaire"),
                            feedback_form=Form(
                                [question], is_skippable=question_obj.is_skippable
                            ),
                        )
                    )
                except StopIteration:
                    break
        if len(trials) > 1:
            for index, trial in enumerate(trials):
                trial.title = _("Questionnaire %(index)i / %(total)i") % {
                    "index": index + 1,
                    "total": len(trials),
                }
        return trials

    def has_played_before(self, session: Session) -> bool:
        """Check if the current participant has completed this game previously.

        Args:
            session: current session

        Returns:
            boolean indicating whether the current participant has finished a session of this game
        """
        previous_games = Session.objects.filter(
            participant=session.participant,
            block=session.block,
            finished_at__isnull=False,
        )
        if previous_games.count():
            return True
        return False

    def calculate_intermediate_score(self, session: Session, result: Result) -> int:
        """process result data during a trial (i.e., between next_round calls). This is only used in the matching_pairs rules files so far.
        Override this in your rules file to control what value should be returned when frontend calls `session/intermediate_score` endpoint.

        Args:
            session: current session
            result: result to be evaluated

        Returns:
            the score of the result
        """
        return 0

    def final_score_message(self, session: Session) -> str:
        """Create final score message for given session, base on score per result
        Override this to display different text on the final screen.

        Args:
            session: the current session

        Returns:
            a string with feedback for the participant based on their score
        """

        correct = 0
        total = 0

        for result in session.result_set.all():
            # if a result has score != 0, it was recognized
            if result.score:
                total += 1

                if result.score > 0:
                    # if a result has score > 0, it was identified correctly
                    correct += 1

        score_message = "Well done!" if session.final_score > 0 else "Too bad!"
        message = "You correctly identified {} out of {} recognized songs!".format(correct, total)
        return score_message + " " + message

    def rank(self, session: Session, exclude_unfinished: bool = True) -> str:
        """Get rank based on session score, based on the participant's percentile rank
        Override this function in your rules file to change rank calculation

        Args:
            session: the current session
            exclude_unfinished: whether unfinished sessions should be excluded when calculating rank

        Returns:
            a string indicating the rank of the participant (e.g., "bronze")
        """
        score = session.final_score
        ranks = Final.RANKS

        # Few or negative points or no score, always return lowest plastic score
        if score <= 0 or not score:
            return ranks["PLASTIC"]

        # Buckets for positive scores:
        # rank: starts percentage
        buckets = [
            # ~ stanines 1-3
            {"rank": ranks["BRONZE"], "min_percentile": 0.0},
            # ~ stanines 4-6
            {"rank": ranks["SILVER"], "min_percentile": 25.0},
            # ~ stanine 7
            {"rank": ranks["GOLD"], "min_percentile": 75.0},
            {"rank": ranks["PLATINUM"], "min_percentile": 90.0},  # ~ stanine 8
            {"rank": ranks["DIAMOND"], "min_percentile": 95.0},  # ~ stanine 9
        ]
        filter_condition = {"finished_at__isnull": False} if exclude_unfinished else {}
        percentile = session.percentile_rank(filter_condition)

        # Check the buckets in reverse order
        # If the percentile rank is higher than the min_percentile
        # return the rank
        for bucket in reversed(buckets):
            if percentile >= bucket["min_percentile"]:
                return bucket["rank"]

        # Default return, in case score isn't in the buckets
        return ranks["PLASTIC"]

    def validate_playlist(self, playlist: Playlist = None) -> list[str]:
        """Validate a playlist associated with this rules file, e.g., ensure that files have a specific name format

        Args:
            playlist: playlist to be checked

        Returns:
            an array of error messages. If return value is an empty list, validation succeeded.
        """
        errors = []
        # Common validations across blocks
        if not playlist:
            errors.append("The block must have a playlist.")
            return errors

        sections = playlist.section_set.all()

        if not sections:
            errors.append("The block must have at least one section.")

        try:
            playlist.clean_csv()
        except ValidationError as e:
            errors += e.error_list

        return errors

calculate_intermediate_score(session, result) #

process result data during a trial (i.e., between next_round calls). This is only used in the matching_pairs rules files so far. Override this in your rules file to control what value should be returned when frontend calls session/intermediate_score endpoint.

Parameters:

Name Type Description Default
session Session

current session

required
result Result

result to be evaluated

required

Returns:

Type Description
int

the score of the result

Source code in backend/experiment/rules/base.py
def calculate_intermediate_score(self, session: Session, result: Result) -> int:
    """process result data during a trial (i.e., between next_round calls). This is only used in the matching_pairs rules files so far.
    Override this in your rules file to control what value should be returned when frontend calls `session/intermediate_score` endpoint.

    Args:
        session: current session
        result: result to be evaluated

    Returns:
        the score of the result
    """
    return 0

calculate_score(result, data) #

Use scoring rule to calculate score.

The function uses the result’s scoring rule, if configured, otherwise, returns None.

Parameters:

Name Type Description Default
result Result

the Result object for which to calculate the score

required
data dict

the data of the participant’s response

required
Source code in backend/experiment/rules/base.py
def calculate_score(
    self, result: Result, data: dict
) -> Optional[Union[int, float]]:
    """Use scoring rule to calculate score.

    The function uses the result's scoring rule, if configured, otherwise, returns `None`.

    Args:
        result: the Result object for which to calculate the score
        data: the data of the participant's response

    """
    scoring_rule = SCORING_RULES.get(result.scoring_rule)
    if scoring_rule:
        return scoring_rule(result, data)
    return None

feedback_info() #

Return info to shown to the user if they are invited to give feedback

Source code in backend/experiment/rules/base.py
def feedback_info(self) -> FeedbackInfo:
    """
    Return info to shown to the user if they are invited to give feedback
    """
    feedback_body = render_to_string("feedback/user_feedback.html", {"email": self.contact_email})
    return {
        # Header above the feedback form
        "header": _("Do you have any remarks or questions?"),
        # Button text
        "button": _("Submit"),
        # Body of the feedback form, can be HTML. Shown under the button
        "contact_body": feedback_body,
        # Thank you message after submitting feedback
        "thank_you": _("We appreciate your feedback!"),
        # Show a floating button on the right side of the screen to open the feedback form
        "show_float_button": False,
    }

final_score_message(session) #

Create final score message for given session, base on score per result Override this to display different text on the final screen.

Parameters:

Name Type Description Default
session Session

the current session

required

Returns:

Type Description
str

a string with feedback for the participant based on their score

Source code in backend/experiment/rules/base.py
def final_score_message(self, session: Session) -> str:
    """Create final score message for given session, base on score per result
    Override this to display different text on the final screen.

    Args:
        session: the current session

    Returns:
        a string with feedback for the participant based on their score
    """

    correct = 0
    total = 0

    for result in session.result_set.all():
        # if a result has score != 0, it was recognized
        if result.score:
            total += 1

            if result.score > 0:
                # if a result has score > 0, it was identified correctly
                correct += 1

    score_message = "Well done!" if session.final_score > 0 else "Too bad!"
    message = "You correctly identified {} out of {} recognized songs!".format(correct, total)
    return score_message + " " + message

get_experiment_url(session) #

return the experiment url. Defaults to experiment.slug

Parameters:

Name Type Description Default
session Session

current session

required
Source code in backend/experiment/rules/base.py
def get_experiment_url(self, session: Session) -> str:
    """
    return the experiment url. Defaults to experiment.slug

    Args:
        session: current session
    """
    participant_id_url_param = (
        f"?participant_id={session.participant.participant_id_url}"
        if session.participant.participant_id_url
        else ""
    )
    return f"/{session.block.phase.experiment.slug}{participant_id_url_param}"

get_play_again_url(session) #

Get the url to play the experiment again

Parameters:

Name Type Description Default
session Session

current session

required
Source code in backend/experiment/rules/base.py
def get_play_again_url(self, session: Session) -> str:
    """Get the url to play the experiment again

    Args:
        session: current session
    """
    participant_id_url_param = (
        f"?participant_id={session.participant.participant_id_url}"
        if session.participant.participant_id_url
        else ""
    )
    return f"/block/{session.block.slug}{participant_id_url_param}"

get_profile_question_trials(session, n_questions=1) #

Get a list of trials for questions not yet answered by the user

Parameters:

Name Type Description Default
session Session

the current session

required
n_questions int

the number of questions to return, set to None if all questions in the blocks’ question lists should be returned at once

1

Returns:

Type Description
list[Trial]

list of Trial actions with unanswered questions

Source code in backend/experiment/rules/base.py
def get_profile_question_trials(
    self, session: Session, n_questions: int = 1
) -> list[Trial]:
    """Get a list of trials for questions not yet answered by the user

    Args:
        session: the current session
        n_questions: the number of questions to return, set to `None` if all questions in the blocks' question lists should be returned at once

    Returns:
        list of `Trial` actions with unanswered questions
    """
    trials = []
    question_lists = session.block.questionlist_set.all()
    if n_questions is None:
        n_questions = sum(ql.questions.count() for ql in question_lists)
    for ql in question_lists:
        questions = ql.questions.order_by("?") if ql.randomize else ql.questions
        question_iterator = get_unanswered_questions(
            session.participant, questions.all()
        )
        while len(trials) < n_questions:
            try:
                question_obj = next(question_iterator)
                profile_result = prepare_profile_result(
                    question_obj.key, session.participant
                )
                question = question_obj.convert_to_action()
                question.result_id = profile_result.id
                trials.append(
                    Trial(
                        title=_("Questionnaire"),
                        feedback_form=Form(
                            [question], is_skippable=question_obj.is_skippable
                        ),
                    )
                )
            except StopIteration:
                break
    if len(trials) > 1:
        for index, trial in enumerate(trials):
            trial.title = _("Questionnaire %(index)i / %(total)i") % {
                "index": index + 1,
                "total": len(trials),
            }
    return trials

has_played_before(session) #

Check if the current participant has completed this game previously.

Parameters:

Name Type Description Default
session Session

current session

required

Returns:

Type Description
bool

boolean indicating whether the current participant has finished a session of this game

Source code in backend/experiment/rules/base.py
def has_played_before(self, session: Session) -> bool:
    """Check if the current participant has completed this game previously.

    Args:
        session: current session

    Returns:
        boolean indicating whether the current participant has finished a session of this game
    """
    previous_games = Session.objects.filter(
        participant=session.participant,
        block=session.block,
        finished_at__isnull=False,
    )
    if previous_games.count():
        return True
    return False

rank(session, exclude_unfinished=True) #

Get rank based on session score, based on the participant’s percentile rank Override this function in your rules file to change rank calculation

Parameters:

Name Type Description Default
session Session

the current session

required
exclude_unfinished bool

whether unfinished sessions should be excluded when calculating rank

True

Returns:

Type Description
str

a string indicating the rank of the participant (e.g., “bronze”)

Source code in backend/experiment/rules/base.py
def rank(self, session: Session, exclude_unfinished: bool = True) -> str:
    """Get rank based on session score, based on the participant's percentile rank
    Override this function in your rules file to change rank calculation

    Args:
        session: the current session
        exclude_unfinished: whether unfinished sessions should be excluded when calculating rank

    Returns:
        a string indicating the rank of the participant (e.g., "bronze")
    """
    score = session.final_score
    ranks = Final.RANKS

    # Few or negative points or no score, always return lowest plastic score
    if score <= 0 or not score:
        return ranks["PLASTIC"]

    # Buckets for positive scores:
    # rank: starts percentage
    buckets = [
        # ~ stanines 1-3
        {"rank": ranks["BRONZE"], "min_percentile": 0.0},
        # ~ stanines 4-6
        {"rank": ranks["SILVER"], "min_percentile": 25.0},
        # ~ stanine 7
        {"rank": ranks["GOLD"], "min_percentile": 75.0},
        {"rank": ranks["PLATINUM"], "min_percentile": 90.0},  # ~ stanine 8
        {"rank": ranks["DIAMOND"], "min_percentile": 95.0},  # ~ stanine 9
    ]
    filter_condition = {"finished_at__isnull": False} if exclude_unfinished else {}
    percentile = session.percentile_rank(filter_condition)

    # Check the buckets in reverse order
    # If the percentile rank is higher than the min_percentile
    # return the rank
    for bucket in reversed(buckets):
        if percentile >= bucket["min_percentile"]:
            return bucket["rank"]

    # Default return, in case score isn't in the buckets
    return ranks["PLASTIC"]

validate_playlist(playlist=None) #

Validate a playlist associated with this rules file, e.g., ensure that files have a specific name format

Parameters:

Name Type Description Default
playlist Playlist

playlist to be checked

None

Returns:

Type Description
list[str]

an array of error messages. If return value is an empty list, validation succeeded.

Source code in backend/experiment/rules/base.py
def validate_playlist(self, playlist: Playlist = None) -> list[str]:
    """Validate a playlist associated with this rules file, e.g., ensure that files have a specific name format

    Args:
        playlist: playlist to be checked

    Returns:
        an array of error messages. If return value is an empty list, validation succeeded.
    """
    errors = []
    # Common validations across blocks
    if not playlist:
        errors.append("The block must have a playlist.")
        return errors

    sections = playlist.section_set.all()

    if not sections:
        errors.append("The block must have at least one section.")

    try:
        playlist.clean_csv()
    except ValidationError as e:
        errors += e.error_list

    return errors

backend.experiment.rules.practice#

PracticeMixin #

Bases: object

PracticeMixin can be used to present a trial a given number of times. After these practice trials, it tests whether the partcipant performed well enough to proceed.

Extend this class in your ruleset if you need a practice run for your participants.

Note that you could use this class to - create rules for a self-contained block with only the practice run, and define the experiment proper in another rules file; - create rules which include the experiment proper after the practice phase.

This practice class is now written towards 2 alternative forced choice rulesets, but may be extended in the future.

Attributes:

Name Type Description
task_description str

will appear in the title of the experiment

first_condition str

the first condition that trials may have (e.g., lower pitch)

first_condition_i18n str

the way the condition will appear to participants, can be translated if you use _() around the string

second_condition str

the second condition that trials may have (e.g., higher pitch)

second_condition_i18n str

the way the condition will appear to participants, can be translated if you use _() around the string

n_practice_rounds int

adjust to the number of practice rounds that should be presented

n_practice_rounds_second_condition int

how often the second condition appears in the practice rounds, e.g., one “catch” trial, or half the practice trials

n_correct int

how many answers of the participant need to be correct to proceed

Example

This is an example of a rules file which would only present the practice run to the participant:

class MyPracticeRun(BaseRules, PracticeMixin):
    task_description = ""
    first_condition = 'lower'
    first_condition_i18n = _("LOWER")
    second_condition = 'higher'
    second_condition_i18n = _("HIGHER")
    n_practice_rounds = 10
    n_practice_rounds_second_condition = 5
    n_correct = 3

    def next_round(self, session):
        return self.next_practice_round(session)
For a full-blown example, refer to the duration_discrimination.py rules file. This implements the experiment proper after the practice run.

Source code in backend/experiment/rules/practice.py
class PracticeMixin(object):
    """PracticeMixin can be used to present a trial a given number of times.
    After these practice trials, it tests whether the partcipant performed well enough to proceed.

    Extend this class in your ruleset if you need a practice run for your participants.

    Note that you could use this class to
        - create rules for a self-contained block with only the practice run, and define the experiment proper in another rules file;
        - create rules which include the experiment proper after the practice phase.

    This practice class is now written towards 2 alternative forced choice rulesets, but may be extended in the future.

    Attributes:
        task_description (str): will appear in the title of the experiment
        first_condition (str): the first condition that trials may have (e.g., lower pitch)
        first_condition_i18n (str): the way the condition will appear to participants, can be translated if you use _() around the string
        second_condition (str): the second condition that trials may have (e.g., higher pitch)
        second_condition_i18n (str): the way the condition will appear to participants, can be translated if you use _() around the string
        n_practice_rounds (int): adjust to the number of practice rounds that should be presented
        n_practice_rounds_second_condition (int): how often the second condition appears in the practice rounds, e.g., one "catch" trial, or half the practice trials
        n_correct (int): how many answers of the participant need to be correct to proceed


    Example:
        This is an example of a rules file which would only present the practice run to the participant:
        ```python
        class MyPracticeRun(BaseRules, PracticeMixin):
            task_description = ""
            first_condition = 'lower'
            first_condition_i18n = _("LOWER")
            second_condition = 'higher'
            second_condition_i18n = _("HIGHER")
            n_practice_rounds = 10
            n_practice_rounds_second_condition = 5
            n_correct = 3

            def next_round(self, session):
                return self.next_practice_round(session)
        ```
        For a full-blown example, refer to the `duration_discrimination.py` rules file. This implements the experiment proper after the practice run.
    """

    task_description = "Pitch discrimination"
    first_condition = 'lower'
    first_condition_i18n = _("LOWER")
    second_condition = 'higher'
    second_condition_i18n = _("HIGHER")
    n_practice_rounds = 4
    n_practice_rounds_second_condition = 1  # how many trials have second condition
    n_correct = 1 # how many trials need to be answered correctly to proceed

    def next_practice_round(self, session: Session) -> list[Union[Trial, Explainer]]:
        """This method implements the logic for presenting explainers, practice rounds,
        and checking after the practice rounds if the participant was successful.

        - if so: proceed to the next stage of the experiment. `session.json_data` will have set `{'practice_done': True}`, which you can check for in your `next_round` logic.

        - if not: delete all results so far, and restart the practice.

        You can call this method from your ruleset's `next_round` function.

        Arguments:
            session: the Session object, as also supplied to `next_round`

        Returns:
            list of Trial and/or Explainer objects
        """
        round_number = session.get_rounds_passed()
        if round_number == 0:
            return [
                self.get_intro_explainer(),
                self.get_practice_explainer(),
                self.get_next_trial(session),
            ]
        if round_number % self.n_practice_rounds == 0:
            if self.practice_successful(session):
                self.finalize_practice(session)
                return [
                    self.get_feedback_explainer(session),
                    self.get_continuation_explainer(),
                ]
            else:
                # generate feedback, then delete all results so far and start over
                feedback = self.get_feedback_explainer(session)
                session.result_set.all().delete()
                return [
                    feedback,
                    self.get_restart_explainer(),
                    self.get_intro_explainer(),
                    self.get_practice_explainer(),
                    self.get_next_trial(session),
                ]
        else:
            return [
                self.get_feedback_explainer(session),
                self.get_next_trial(session),
            ]

    def finalize_practice(self, session: Session):
        """Finalize practice: set `{"practice_done": True}` in `session.json_data`

        Arguments:
            session: the Session object, as supplied to the `next_round` method
        """
        session.save_json_data({"practice_done": True})

    def get_intro_explainer(self) -> Explainer:
        """Override this method to explain the procedure of the current block to your participants.

        Returns:
            Explainer object
        """
        return Explainer(
            instruction=_("In this test you will hear two tones"),
            steps=[
                Step(
                    _(
                        "It's your job to decide if the second tone is %(first_condition)s or %(second_condition)s  than the second tone"
                    )
                    % {
                        "first_condition": self.first_condition_i18n,
                        "second_condition": self.second_condition_i18n,
                    }
                ),
                Step(
                    _(
                        "During the experiment it will become more difficult to hear the difference."
                    )
                ),
                Step(
                    _(
                        "Try to answer as accurately as possible, even if you're uncertain."
                    )
                ),
                Step(
                    _(
                        "This test will take around 4 minutes to complete. Try to stay focused for the entire test!"
                    )
                ),
            ],
            button_label="Ok",
            step_numbers=True,
        )

    def get_practice_explainer(self) -> Explainer:
        """Override this method if you want to give extra information about the practice itself.

        Returns:
            Explainer object
        """
        return Explainer(
            instruction=_("We will now practice first."),
            steps=[
                Step(
                    description=_(
                        "First you will hear %(n_practice_rounds)d practice trials."
                    )
                    % {"n_practice_rounds": self.n_practice_rounds}
                ),
            ],
            button_label=_("Begin experiment"),
        )

    def get_restart_explainer(self) -> Explainer:
        """Override this method if you want to adjust the feedback to why participants need to practice again.

        Returns:
            Explainer object
        """
        return Explainer(
            instruction=_(
                "You have answered %(n_correct)d or more practice trials incorrectly."
            )
            % {"n_correct": self.n_correct},
            steps=[
                Step(_("We will therefore practice again.")),
                Step(_("But first, you can read the instructions again.")),
            ],
            button_label=_("Continue"),
        )

    def get_continuation_explainer(self) -> Explainer:
        """Override this explainer if you want to give extra information to the participant before the actual test phase starts.
        Returns:
            Explainer object
        """
        return Explainer(
            instruction=_(
                'Now we will start the real experiment.'),
            steps=[
                Step(_('Pay attention! During the experiment it will become more difficult to hear the difference between the tones.')),
                Step(_(
                        "Try to answer as accurately as possible, even if you're uncertain.")),
                Step(_(
                        "Remember that you don't move along or tap during the test.")),
            ],
            step_numbers=True,
            button_label=_('Start')
        )

    def get_feedback_explainer(self, session: Session) -> Explainer:
        """Override this explainer if you need to give different feedback to participants about whether or not they answered correctly.

        Returns:
            Explainer object
        """
        correct_response, is_correct = self.get_condition_and_correctness(session)
        if is_correct:
            instruction = _(
                "The second tone was %(correct_response)s than the first tone. Your answer was CORRECT."
            ) % {"correct_response": correct_response}
        else:
            instruction = _(
                "The second tone was %(correct_response)s than the first tone. Your answer was INCORRECT."
            ) % {"correct_response": correct_response}
        return Explainer(
            instruction=instruction,
            steps=[],
            button_label=_('Ok')
        )

    def get_condition_and_correctness(self, session: Session) -> Tuple[str, bool]:
        """Checks whether the condition of the last Trial, and whether the response of the participant was correct.
        This method is called from `get_feedback_explainer`.

        Args:
            session: Session object, as supplied to the `next_round` method

        Returns:
            a tuple of the last trial's condition, and whether it was answered correctly
        """
        last_result = session.last_result()
        correct_response = (
            self.first_condition_i18n
            if last_result.expected_response == self.first_condition
            else self.second_condition_i18n
        )
        return (
            correct_response,
            last_result.expected_response == last_result.given_response,
        )

    def get_condition(self, session: Session) -> str:
        """Keep track of the conditions presented in the practice phase through the `session.json_data`.
        In the default implementation, it will generate `n_practice_rounds` conditions, with `n_second_condition` times the second condition,
        and `n_practice_rounds - n_second_condition` times the first condition, shuffle these randomly,
        and then present one condition each round.

        Override this method if you need a different setup.

        Arguments:
            session: the Session object, as supplied to the `next_round` method
        """
        conditions = session.json_data.get("conditions")
        if not conditions:
            conditions = [
                self.first_condition
            ] * self.n_practice_rounds_second_condition + [self.second_condition] * (
                self.n_practice_rounds - self.n_practice_rounds_second_condition
            )
            while conditions[-2] == conditions[-1]:
                # we want the conditions shuffled so that we don't get the same condition twice right away
                random.shuffle(conditions)
            session.save_json_data({'conditions': conditions})
        condition = conditions.pop()
        session.save_json_data({'conditions': conditions})
        session.save()
        return condition

    def get_next_trial(self, session: Session) -> Trial:
        """
        Provide the next trial action

        Args:
            session: the Session object, as supplied to the `next_round` function

        Returns:
            Trial object
        """
        round_number = session.get_rounds_passed()
        condition = self.get_condition(session)
        try:
            section = session.playlist.get_section(
                {"group": "practice", "tag": condition}
            )
        except Section.DoesNotExist:
            raise
        expected_response = condition
        total_rounds = self.n_practice_rounds * math.ceil(
            round_number / self.n_practice_rounds
        )
        key = self.task_description.replace(" ", "_") + "_practice"
        question = ButtonArrayQuestion(
            text=_(
                "Is the second tone %(first_condition)s or %(second_condition)s than the first tone?"
            )
            % {
                "first_condition": self.first_condition_i18n,
                "second_condition": self.second_condition_i18n,
            },
            key=key,
            choices={
                self.first_condition: self.first_condition_i18n,
                self.second_condition: self.second_condition_i18n,
            },
            result_id=prepare_result(
                key,
                session,
                section=section,
                expected_response=expected_response,
                scoring_rule="CORRECTNESS",
            ),
        )
        playback = Autoplay([section])
        form = Form([question], submit_label="")
        return Trial(
            playback=playback,
            feedback_form=form,
            title=_(
                "%(task_description)s: Practice round %(round_number)d of %(total_rounds)d"
                % {
                    "task_description": self.task_description,
                    "round_number": round_number + 1,
                    "total_rounds": total_rounds,
                }
            ),
            config={"listen_first": True, "response_time": section.duration + 0.1},
        )

    def practice_successful(self, session: Session) -> bool:
        """Checks if the practice is correct, i.e., that at the participant gave at least `n_correct` correct responses.

        Override this method if you need different logic.

        Arguments:
            session: the Session object, as supplied to the `next_round` method

        Returns:
            a boolean indicating whether or not the practice was successful
        """
        results = session.last_n_results(n_results=self.n_practice_rounds)
        correct = sum(result.score for result in results)
        return correct >= self.n_correct

finalize_practice(session) #

Finalize practice: set {"practice_done": True} in session.json_data

Parameters:

Name Type Description Default
session Session

the Session object, as supplied to the next_round method

required
Source code in backend/experiment/rules/practice.py
def finalize_practice(self, session: Session):
    """Finalize practice: set `{"practice_done": True}` in `session.json_data`

    Arguments:
        session: the Session object, as supplied to the `next_round` method
    """
    session.save_json_data({"practice_done": True})

get_condition(session) #

Keep track of the conditions presented in the practice phase through the session.json_data. In the default implementation, it will generate n_practice_rounds conditions, with n_second_condition times the second condition, and n_practice_rounds - n_second_condition times the first condition, shuffle these randomly, and then present one condition each round.

Override this method if you need a different setup.

Parameters:

Name Type Description Default
session Session

the Session object, as supplied to the next_round method

required
Source code in backend/experiment/rules/practice.py
def get_condition(self, session: Session) -> str:
    """Keep track of the conditions presented in the practice phase through the `session.json_data`.
    In the default implementation, it will generate `n_practice_rounds` conditions, with `n_second_condition` times the second condition,
    and `n_practice_rounds - n_second_condition` times the first condition, shuffle these randomly,
    and then present one condition each round.

    Override this method if you need a different setup.

    Arguments:
        session: the Session object, as supplied to the `next_round` method
    """
    conditions = session.json_data.get("conditions")
    if not conditions:
        conditions = [
            self.first_condition
        ] * self.n_practice_rounds_second_condition + [self.second_condition] * (
            self.n_practice_rounds - self.n_practice_rounds_second_condition
        )
        while conditions[-2] == conditions[-1]:
            # we want the conditions shuffled so that we don't get the same condition twice right away
            random.shuffle(conditions)
        session.save_json_data({'conditions': conditions})
    condition = conditions.pop()
    session.save_json_data({'conditions': conditions})
    session.save()
    return condition

get_condition_and_correctness(session) #

Checks whether the condition of the last Trial, and whether the response of the participant was correct. This method is called from get_feedback_explainer.

Parameters:

Name Type Description Default
session Session

Session object, as supplied to the next_round method

required

Returns:

Type Description
Tuple[str, bool]

a tuple of the last trial’s condition, and whether it was answered correctly

Source code in backend/experiment/rules/practice.py
def get_condition_and_correctness(self, session: Session) -> Tuple[str, bool]:
    """Checks whether the condition of the last Trial, and whether the response of the participant was correct.
    This method is called from `get_feedback_explainer`.

    Args:
        session: Session object, as supplied to the `next_round` method

    Returns:
        a tuple of the last trial's condition, and whether it was answered correctly
    """
    last_result = session.last_result()
    correct_response = (
        self.first_condition_i18n
        if last_result.expected_response == self.first_condition
        else self.second_condition_i18n
    )
    return (
        correct_response,
        last_result.expected_response == last_result.given_response,
    )

get_continuation_explainer() #

Override this explainer if you want to give extra information to the participant before the actual test phase starts. Returns: Explainer object

Source code in backend/experiment/rules/practice.py
def get_continuation_explainer(self) -> Explainer:
    """Override this explainer if you want to give extra information to the participant before the actual test phase starts.
    Returns:
        Explainer object
    """
    return Explainer(
        instruction=_(
            'Now we will start the real experiment.'),
        steps=[
            Step(_('Pay attention! During the experiment it will become more difficult to hear the difference between the tones.')),
            Step(_(
                    "Try to answer as accurately as possible, even if you're uncertain.")),
            Step(_(
                    "Remember that you don't move along or tap during the test.")),
        ],
        step_numbers=True,
        button_label=_('Start')
    )

get_feedback_explainer(session) #

Override this explainer if you need to give different feedback to participants about whether or not they answered correctly.

Returns:

Type Description
Explainer

Explainer object

Source code in backend/experiment/rules/practice.py
def get_feedback_explainer(self, session: Session) -> Explainer:
    """Override this explainer if you need to give different feedback to participants about whether or not they answered correctly.

    Returns:
        Explainer object
    """
    correct_response, is_correct = self.get_condition_and_correctness(session)
    if is_correct:
        instruction = _(
            "The second tone was %(correct_response)s than the first tone. Your answer was CORRECT."
        ) % {"correct_response": correct_response}
    else:
        instruction = _(
            "The second tone was %(correct_response)s than the first tone. Your answer was INCORRECT."
        ) % {"correct_response": correct_response}
    return Explainer(
        instruction=instruction,
        steps=[],
        button_label=_('Ok')
    )

get_intro_explainer() #

Override this method to explain the procedure of the current block to your participants.

Returns:

Type Description
Explainer

Explainer object

Source code in backend/experiment/rules/practice.py
def get_intro_explainer(self) -> Explainer:
    """Override this method to explain the procedure of the current block to your participants.

    Returns:
        Explainer object
    """
    return Explainer(
        instruction=_("In this test you will hear two tones"),
        steps=[
            Step(
                _(
                    "It's your job to decide if the second tone is %(first_condition)s or %(second_condition)s  than the second tone"
                )
                % {
                    "first_condition": self.first_condition_i18n,
                    "second_condition": self.second_condition_i18n,
                }
            ),
            Step(
                _(
                    "During the experiment it will become more difficult to hear the difference."
                )
            ),
            Step(
                _(
                    "Try to answer as accurately as possible, even if you're uncertain."
                )
            ),
            Step(
                _(
                    "This test will take around 4 minutes to complete. Try to stay focused for the entire test!"
                )
            ),
        ],
        button_label="Ok",
        step_numbers=True,
    )

get_next_trial(session) #

Provide the next trial action

Parameters:

Name Type Description Default
session Session

the Session object, as supplied to the next_round function

required

Returns:

Type Description
Trial

Trial object

Source code in backend/experiment/rules/practice.py
def get_next_trial(self, session: Session) -> Trial:
    """
    Provide the next trial action

    Args:
        session: the Session object, as supplied to the `next_round` function

    Returns:
        Trial object
    """
    round_number = session.get_rounds_passed()
    condition = self.get_condition(session)
    try:
        section = session.playlist.get_section(
            {"group": "practice", "tag": condition}
        )
    except Section.DoesNotExist:
        raise
    expected_response = condition
    total_rounds = self.n_practice_rounds * math.ceil(
        round_number / self.n_practice_rounds
    )
    key = self.task_description.replace(" ", "_") + "_practice"
    question = ButtonArrayQuestion(
        text=_(
            "Is the second tone %(first_condition)s or %(second_condition)s than the first tone?"
        )
        % {
            "first_condition": self.first_condition_i18n,
            "second_condition": self.second_condition_i18n,
        },
        key=key,
        choices={
            self.first_condition: self.first_condition_i18n,
            self.second_condition: self.second_condition_i18n,
        },
        result_id=prepare_result(
            key,
            session,
            section=section,
            expected_response=expected_response,
            scoring_rule="CORRECTNESS",
        ),
    )
    playback = Autoplay([section])
    form = Form([question], submit_label="")
    return Trial(
        playback=playback,
        feedback_form=form,
        title=_(
            "%(task_description)s: Practice round %(round_number)d of %(total_rounds)d"
            % {
                "task_description": self.task_description,
                "round_number": round_number + 1,
                "total_rounds": total_rounds,
            }
        ),
        config={"listen_first": True, "response_time": section.duration + 0.1},
    )

get_practice_explainer() #

Override this method if you want to give extra information about the practice itself.

Returns:

Type Description
Explainer

Explainer object

Source code in backend/experiment/rules/practice.py
def get_practice_explainer(self) -> Explainer:
    """Override this method if you want to give extra information about the practice itself.

    Returns:
        Explainer object
    """
    return Explainer(
        instruction=_("We will now practice first."),
        steps=[
            Step(
                description=_(
                    "First you will hear %(n_practice_rounds)d practice trials."
                )
                % {"n_practice_rounds": self.n_practice_rounds}
            ),
        ],
        button_label=_("Begin experiment"),
    )

get_restart_explainer() #

Override this method if you want to adjust the feedback to why participants need to practice again.

Returns:

Type Description
Explainer

Explainer object

Source code in backend/experiment/rules/practice.py
def get_restart_explainer(self) -> Explainer:
    """Override this method if you want to adjust the feedback to why participants need to practice again.

    Returns:
        Explainer object
    """
    return Explainer(
        instruction=_(
            "You have answered %(n_correct)d or more practice trials incorrectly."
        )
        % {"n_correct": self.n_correct},
        steps=[
            Step(_("We will therefore practice again.")),
            Step(_("But first, you can read the instructions again.")),
        ],
        button_label=_("Continue"),
    )

next_practice_round(session) #

This method implements the logic for presenting explainers, practice rounds, and checking after the practice rounds if the participant was successful.

  • if so: proceed to the next stage of the experiment. session.json_data will have set {'practice_done': True}, which you can check for in your next_round logic.

  • if not: delete all results so far, and restart the practice.

You can call this method from your ruleset’s next_round function.

Parameters:

Name Type Description Default
session Session

the Session object, as also supplied to next_round

required

Returns:

Type Description
list[Union[Trial, Explainer]]

list of Trial and/or Explainer objects

Source code in backend/experiment/rules/practice.py
def next_practice_round(self, session: Session) -> list[Union[Trial, Explainer]]:
    """This method implements the logic for presenting explainers, practice rounds,
    and checking after the practice rounds if the participant was successful.

    - if so: proceed to the next stage of the experiment. `session.json_data` will have set `{'practice_done': True}`, which you can check for in your `next_round` logic.

    - if not: delete all results so far, and restart the practice.

    You can call this method from your ruleset's `next_round` function.

    Arguments:
        session: the Session object, as also supplied to `next_round`

    Returns:
        list of Trial and/or Explainer objects
    """
    round_number = session.get_rounds_passed()
    if round_number == 0:
        return [
            self.get_intro_explainer(),
            self.get_practice_explainer(),
            self.get_next_trial(session),
        ]
    if round_number % self.n_practice_rounds == 0:
        if self.practice_successful(session):
            self.finalize_practice(session)
            return [
                self.get_feedback_explainer(session),
                self.get_continuation_explainer(),
            ]
        else:
            # generate feedback, then delete all results so far and start over
            feedback = self.get_feedback_explainer(session)
            session.result_set.all().delete()
            return [
                feedback,
                self.get_restart_explainer(),
                self.get_intro_explainer(),
                self.get_practice_explainer(),
                self.get_next_trial(session),
            ]
    else:
        return [
            self.get_feedback_explainer(session),
            self.get_next_trial(session),
        ]

practice_successful(session) #

Checks if the practice is correct, i.e., that at the participant gave at least n_correct correct responses.

Override this method if you need different logic.

Parameters:

Name Type Description Default
session Session

the Session object, as supplied to the next_round method

required

Returns:

Type Description
bool

a boolean indicating whether or not the practice was successful

Source code in backend/experiment/rules/practice.py
def practice_successful(self, session: Session) -> bool:
    """Checks if the practice is correct, i.e., that at the participant gave at least `n_correct` correct responses.

    Override this method if you need different logic.

    Arguments:
        session: the Session object, as supplied to the `next_round` method

    Returns:
        a boolean indicating whether or not the practice was successful
    """
    results = session.last_n_results(n_results=self.n_practice_rounds)
    correct = sum(result.score for result in results)
    return correct >= self.n_correct

backend.experiment.rules.staircasing#

register_turnpoint(session, last_result) #

register turnpoint: - set comment on previous result to indicate turnpoint - increase final_score (used as counter for turnpoints)

Source code in backend/experiment/rules/util/staircasing.py
1
2
3
4
5
6
7
8
def register_turnpoint(session, last_result):
    """ register turnpoint:
        - set comment on previous result to indicate turnpoint
        - increase final_score (used as counter for turnpoints) """
    last_result.comment += ': turnpoint'
    last_result.save()
    session.final_score += 1
    session.save()