From ab7d3b526b01fc16e170a013b9cbd6a27a0136b7 Mon Sep 17 00:00:00 2001
From: Nick Parlante <nick.parlante@cs.stanford.edu>
Date: Thu, 3 Oct 2013 10:41:24 -0700
Subject: [PATCH] Multiple-choice features

Features by Jeff Ericson and Nick Parlante squashed
down to one commit.

-shuffle of choices within a choicegroup (Nick)
-answer-pool subsetting within a choicegroup (Jeff)
-masking of choice names within a choicegroup, (Nick)
 used by shuffle and answer-pool
-targeted-feedback within a multiplechoiceresponse (Jeff)
-delay-between-submissions capa feature (Jeff)
---
 CHANGELOG.rst                                 |   3 +
 .../contentstore/features/problem-editor.py   |   3 +-
 common/lib/capa/capa/capa_problem.py          |  82 +++
 common/lib/capa/capa/customrender.py          |  40 ++
 common/lib/capa/capa/responsetypes.py         | 265 ++++++-
 common/lib/capa/capa/tests/__init__.py        |   4 +-
 .../lib/capa/capa/tests/test_answer_pool.py   | 651 ++++++++++++++++++
 common/lib/capa/capa/tests/test_shuffle.py    | 306 ++++++++
 .../capa/capa/tests/test_targeted_feedback.py | 613 +++++++++++++++++
 common/lib/xmodule/xmodule/capa_base.py       | 151 +++-
 .../lib/xmodule/xmodule/css/capa/display.scss |  45 ++
 .../xmodule/js/spec/problem/edit_spec.coffee  |  99 +++
 .../xmodule/js/src/problem/edit.coffee        |  46 +-
 .../xmodule/xmodule/tests/test_capa_module.py | 104 ++-
 .../tests/test_delay_between_attempts.py      | 306 ++++++++
 15 files changed, 2674 insertions(+), 44 deletions(-)
 create mode 100644 common/lib/capa/capa/tests/test_answer_pool.py
 create mode 100644 common/lib/capa/capa/tests/test_shuffle.py
 create mode 100644 common/lib/capa/capa/tests/test_targeted_feedback.py
 create mode 100644 common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py

diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 7f736932fd8..c788940854f 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -67,6 +67,9 @@ Blades: Add view for field type Dict in Studio. BLD-658.
 
 Blades: Refactor stub implementation of LTI Provider. BLD-601.
 
+LMS: multiple choice features: shuffle, answer-pool, targeted-feedback,
+choice name masking, submission timer
+
 Studio: Added ability to edit course short descriptions that appear on the course catalog page.
 
 LMS: In left accordion and progress page, due dates are now displayed in time
diff --git a/cms/djangoapps/contentstore/features/problem-editor.py b/cms/djangoapps/contentstore/features/problem-editor.py
index 9bada6f78cd..035fb0e5afc 100644
--- a/cms/djangoapps/contentstore/features/problem-editor.py
+++ b/cms/djangoapps/contentstore/features/problem-editor.py
@@ -13,7 +13,7 @@ MAXIMUM_ATTEMPTS = "Maximum Attempts"
 PROBLEM_WEIGHT = "Problem Weight"
 RANDOMIZATION = 'Randomization'
 SHOW_ANSWER = "Show Answer"
-
+TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
 
 @step('I have created a Blank Common Problem$')
 def i_created_blank_common_problem(step):
@@ -44,6 +44,7 @@ def i_see_advanced_settings_with_values(step):
             [PROBLEM_WEIGHT, "", False],
             [RANDOMIZATION, "Never", False],
             [SHOW_ANSWER, "Finished", False],
+            [TIMER_BETWEEN_ATTEMPTS, "0", False]
         ])
 
 
diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py
index da51289effa..7d511a66245 100644
--- a/common/lib/capa/capa/capa_problem.py
+++ b/common/lib/capa/capa/capa_problem.py
@@ -178,6 +178,14 @@ class LoncapaProblem(object):
         #   input_id string -> InputType object
         self.inputs = {}
 
+        # Run response late_transforms last (see MultipleChoiceResponse)
+        # Sort the responses to be in *_1 *_2 ... order.
+        responses = self.responders.values()
+        responses = sorted(responses, key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:]))
+        for response in responses:
+            if hasattr(response, 'late_transforms'):
+                response.late_transforms(self)
+
         self.extracted_tree = self._extract_html(self.tree)
 
     def do_reset(self):
@@ -419,10 +427,84 @@ class LoncapaProblem(object):
             answer_ids.append(results.keys())
         return answer_ids
 
+    def do_targeted_feedback(self, tree):
+        """
+        Implements the targeted-feedback=N in-place on  <multiplechoiceresponse> --
+        choice-level explanations shown to a student after submission.
+        Does nothing if there is no targeted-feedback attribute.
+        """
+        for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'):
+            # Note that the modifications has been done, avoiding problems if called twice.
+            if hasattr(self, 'has_targeted'):
+                continue
+            self.has_targeted = True  # pylint: disable=W0201
+
+            show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation'
+
+            # Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag)
+            choicegroup = mult_choice_response.xpath('./choicegroup[@type="MultipleChoice"]')[0]
+            choices_list = list(choicegroup.iter('choice'))
+
+            # Find the student answer key that matches our <choicegroup> id
+            student_answer = self.student_answers.get(choicegroup.get('id'))
+            expl_id_for_student_answer = None
+
+            # Keep track of the explanation-id that corresponds to the student's answer
+            # Also, keep track of the solution-id
+            solution_id = None
+            for choice in choices_list:
+                if choice.get('name') == student_answer:
+                    expl_id_for_student_answer = choice.get('explanation-id')
+                if choice.get('correct') == 'true':
+                    solution_id = choice.get('explanation-id')
+
+            # Filter out targetedfeedback that doesn't correspond to the answer the student selected
+            # Note: following-sibling will grab all following siblings, so we just want the first in the list
+            targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset')
+            if len(targetedfeedbackset) != 0:
+                targetedfeedbackset = targetedfeedbackset[0]
+                targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback')
+                for targetedfeedback in targetedfeedbacks:
+                    # Don't show targeted feedback if the student hasn't answer the problem
+                    # or if the target feedback doesn't match the student's (incorrect) answer
+                    if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer:
+                        targetedfeedbackset.remove(targetedfeedback)
+
+            # Do not displace the solution under these circumstances
+            if not show_explanation or not self.done:
+                continue
+
+            # The next element should either be <solution> or <solutionset>
+            next_element = targetedfeedbackset.getnext()
+            parent_element = tree
+            solution_element = None
+            if next_element is not None and next_element.tag == 'solution':
+                solution_element = next_element
+            elif next_element is not None and next_element.tag == 'solutionset':
+                solutions = next_element.xpath('./solution')
+                for solution in solutions:
+                    if solution.get('explanation-id') == solution_id:
+                        parent_element = next_element
+                        solution_element = solution
+
+            # If could not find the solution element, then skip the remaining steps below
+            if solution_element is None:
+                continue
+
+            # Change our correct-choice explanation from a "solution explanation" to within
+            # the set of targeted feedback, which means the explanation will render on the page
+            # without the student clicking "Show Answer" or seeing a checkmark next to the correct choice
+            parent_element.remove(solution_element)
+
+            # Add our solution instead to the targetedfeedbackset and change its tag name
+            solution_element.tag = 'targetedfeedback'
+            targetedfeedbackset.append(solution_element)
+
     def get_html(self):
         """
         Main method called externally to get the HTML to be rendered for this capa Problem.
         """
+        self.do_targeted_feedback(self.tree)
         html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
         return html
 
diff --git a/common/lib/capa/capa/customrender.py b/common/lib/capa/capa/customrender.py
index 9203de9449c..24f8d9b9ec4 100644
--- a/common/lib/capa/capa/customrender.py
+++ b/common/lib/capa/capa/customrender.py
@@ -11,6 +11,7 @@ from .registry import TagRegistry
 import logging
 import re
 
+from cgi import escape as cgi_escape
 from lxml import etree
 import xml.sax.saxutils as saxutils
 from .registry import TagRegistry
@@ -98,3 +99,42 @@ class SolutionRenderer(object):
         return etree.XML(html)
 
 registry.register(SolutionRenderer)
+
+#-----------------------------------------------------------------------------
+
+
+class TargetedFeedbackRenderer(object):
+    """
+    A targeted feedback is just a <span>...</span> that is used for displaying an
+    extended piece of feedback to students if they incorrectly answered a question.
+    """
+    tags = ['targetedfeedback']
+
+    def __init__(self, system, xml):
+        self.system = system
+        self.xml = xml
+
+    def get_html(self):
+        """
+        Return the contents of this tag, rendered to html, as an etree element.
+        """
+        html = '<section class="targeted-feedback-span"><span>{}</span></section>'.format(etree.tostring(self.xml))
+        try:
+            xhtml = etree.XML(html)
+        except Exception as err:  # pylint: disable=broad-except
+            if self.system.DEBUG:
+                msg = """
+                    <html>
+                      <div class="inline-error">
+                        <p>Error {err}</p>
+                        <p>Failed to construct targeted feedback from <pre>{html}</pre></p>
+                      </div>
+                    </html>
+                """.format(err=cgi_escape(err), html=cgi_escape(html))
+                log.error(msg)
+                return etree.XML(msg)
+            else:
+                raise
+        return xhtml
+
+registry.register(TargetedFeedbackRenderer)
diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py
index bcf9c78cfb8..eacde78f60f 100644
--- a/common/lib/capa/capa/responsetypes.py
+++ b/common/lib/capa/capa/responsetypes.py
@@ -414,6 +414,19 @@ class LoncapaResponse(object):
 
         return response_msg_div
 
+    # These accessor functions allow polymorphic checking of response
+    # objects without having to call hasattr() directly.
+    def has_mask(self):
+        """True if the response has masking."""
+        return hasattr(self, '_has_mask')
+
+    def has_shuffle(self):
+        """True if the response has a shuffle transformation."""
+        return hasattr(self, '_has_shuffle')
+
+    def has_answerpool(self):
+        """True if the response has an answer-pool transformation."""
+        return hasattr(self, '_has_answerpool')
 
 #-----------------------------------------------------------------------------
 
@@ -718,6 +731,22 @@ class ChoiceResponse(LoncapaResponse):
 
 @registry.register
 class MultipleChoiceResponse(LoncapaResponse):
+    """
+    Multiple Choice Response
+    The shuffle and answer-pool features on this class enable permuting and
+    subsetting the choices shown to the student.
+    Both features enable name "masking":
+    With masking, the regular names of multiplechoice choices
+    choice_0 choice_1 ... are not used. Instead we use random masked names
+    mask_2 mask_0 ... so that a view-source of the names reveals nothing about
+    the original order. We introduce the masked names right at init time, so the
+    whole software stack works with just the one system of naming.
+    The .has_mask() test on a response checks for masking, implemented by a
+    ._has_mask attribute on the response object.
+    The logging functionality in capa_base calls the unmask functions here
+    to translate back to choice_0 name style for recording in the logs, so
+    the logging is in terms of the regular names.
+    """
     # TODO: handle direction and randomize
 
     tags = ['multiplechoiceresponse']
@@ -745,19 +774,53 @@ class MultipleChoiceResponse(LoncapaResponse):
     def mc_setup_response(self):
         """
         Initialize name attributes in <choice> stanzas in the <choicegroup> in this response.
+        Masks the choice names if applicable.
         """
         i = 0
         for response in self.xml.xpath("choicegroup"):
+            # Is Masking enabled? -- check for shuffle or answer-pool features
+            ans_str = response.get("answer-pool")
+            if response.get("shuffle") == "true" or (ans_str is not None and ans_str != "0"):
+                self._has_mask = True  # pylint: disable=W0201
+                self._mask_dict = {}   # pylint: disable=W0201
+                # We do not want the random mask names to be the same
+                # for all responses in a problem (sharing the one seed),
+                # like mask_2 in view-source turns out to always be the correct choice.
+                # But it must be repeatable and a function of the seed.
+                # Therefore we add the _1 number from the .id to the seed.
+                seed_delta = int(self.id[self.id.rindex("_") + 1:])
+                rng = random.Random(self.context["seed"] + seed_delta)
+                # e.g. mask_ids = [3, 1, 0, 2]
+                mask_ids = range(len(response))
+                rng.shuffle(mask_ids)
             rtype = response.get('type')
             if rtype not in ["MultipleChoice"]:
                 # force choicegroup to be MultipleChoice if not valid
                 response.set("type", "MultipleChoice")
             for choice in list(response):
-                if choice.get("name") is None:
-                    choice.set("name", "choice_" + str(i))
+                # The regular, non-masked name:
+                if choice.get("name") is not None:
+                    name = "choice_" + choice.get("name")
+                else:
+                    name = "choice_" + str(i)
                     i += 1
+                # If using the masked name, e.g. mask_0, save the regular name
+                # to support unmasking later (for the logs).
+                if self.has_mask():
+                    mask_name = "mask_" + str(mask_ids.pop())
+                    self._mask_dict[mask_name] = name
+                    choice.set("name", mask_name)
                 else:
-                    choice.set("name", "choice_" + choice.get("name"))
+                    choice.set("name", name)
+
+    def late_transforms(self, problem):
+        """
+        Rearrangements run late in the __init__ process.
+        Cannot do these at response init time, as not enough
+        other stuff exists at that time.
+        """
+        self.do_shuffle(self.xml, problem)
+        self.do_answer_pool(self.xml, problem)
 
     def get_score(self, student_answers):
         """
@@ -774,6 +837,202 @@ class MultipleChoiceResponse(LoncapaResponse):
     def get_answers(self):
         return {self.answer_id: self.correct_choices}
 
+    def unmask_name(self, name):
+        """
+        Given a masked name, e.g. mask_2, returns the regular name, e.g. choice_0.
+        Fails with LoncapaProblemError if called on a response that is not masking.
+        """
+        if not self.has_mask():
+            raise LoncapaProblemError("unmask_name called on response that is not masked")
+        return self._mask_dict[name]
+
+    def unmask_order(self):
+        """
+        Returns a list of the choice names in the order displayed to the user,
+        using the regular (non-masked) names.
+        Fails with LoncapaProblemError if called on a response that is not masking.
+        """
+        choices = self.xml.xpath('choicegroup/choice')
+        # We let the unmask_name() raise the error for us if this response is not masking.
+        return [self.unmask_name(choice.get("name")) for choice in choices]
+
+    def do_shuffle(self, tree, problem):
+        """
+        For a choicegroup with shuffle="true", shuffles the choices in-place in the given tree
+        based on the seed. Otherwise does nothing.
+        Raises LoncapaProblemError if both shuffle and answer-pool are active:
+        a problem should use one or the other but not both.
+        Does nothing if the tree has already been processed.
+        """
+        # The tree is already pared down to this <multichoiceresponse> so this query just
+        # gets the child choicegroup (i.e. no leading //)
+        choicegroups = tree.xpath('choicegroup[@shuffle="true"]')
+        if choicegroups:
+            choicegroup = choicegroups[0]
+            if choicegroup.get('answer-pool') is not None:
+                raise LoncapaProblemError("Do not use shuffle and answer-pool at the same time")
+            # Note in the response that shuffling is done.
+            # Both to avoid double-processing, and to feed the logs.
+            if self.has_shuffle():
+                return
+            self._has_shuffle = True  # pylint: disable=W0201
+            # Move elements from tree to list for shuffling, then put them back.
+            ordering = list(choicegroup.getchildren())
+            for choice in ordering:
+                choicegroup.remove(choice)
+            ordering = self.shuffle_choices(ordering, self.get_rng(problem))
+            for choice in ordering:
+                choicegroup.append(choice)
+
+    def shuffle_choices(self, choices, rng):
+        """
+        Returns a list of choice nodes with the shuffling done,
+        using the provided random number generator.
+        Choices with 'fixed'='true' are held back from the shuffle.
+        """
+        # Separate out a list of the stuff to be shuffled
+        # vs. the head/tail of fixed==true choices to be held back from the shuffle.
+        # Rare corner case: A fixed==true choice "island" in the middle is lumped in
+        # with the tail group of fixed choices.
+        # Slightly tricky one-pass implementation using a state machine
+        head = []
+        middle = []  # only this one gets shuffled
+        tail = []
+        at_head = True
+        for choice in choices:
+            if at_head and choice.get('fixed') == 'true':
+                head.append(choice)
+                continue
+            at_head = False
+            if choice.get('fixed') == 'true':
+                tail.append(choice)
+            else:
+                middle.append(choice)
+        rng.shuffle(middle)
+        return head + middle + tail
+
+    def get_rng(self, problem):
+        """
+        Get the random number generator to be shared by responses
+        of the problem, creating it on the problem if needed.
+        """
+        # Multiple questions in a problem share one random number generator (rng) object
+        # stored on the problem. If each question got its own rng, the structure of multiple
+        # questions within a problem could appear predictable to the student,
+        # e.g. (c) keeps being the correct choice. This is due to the seed being
+        # defined at the problem level, so the multiple rng's would be seeded the same.
+        # The name _shared_rng begins with an _ to suggest that it is not a facility
+        # for general use.
+        # pylint: disable=protected-access
+        if not hasattr(problem, '_shared_rng'):
+            problem._shared_rng = random.Random(self.context['seed'])
+        return problem._shared_rng
+
+    def do_answer_pool(self, tree, problem):
+        """
+        Implements the answer-pool subsetting operation in-place on the tree.
+        Allows for problem questions with a pool of answers, from which answer options shown to the student
+        and randomly selected so that there is always 1 correct answer and n-1 incorrect answers,
+        where the author specifies n as the value of the attribute "answer-pool" within <choicegroup>
+
+        The <choicegroup> tag must have an attribute 'answer-pool' giving the desired
+        pool size. If that attribute is zero or not present, no operation is performed.
+        Calling this a second time does nothing.
+        Raises LoncapaProblemError if the answer-pool value is not an integer,
+        or if the number of correct or incorrect choices available is zero.
+        """
+        choicegroups = tree.xpath("choicegroup[@answer-pool]")
+        if choicegroups:
+            choicegroup = choicegroups[0]
+            num_str = choicegroup.get('answer-pool')
+            if num_str == '0':
+                return
+            try:
+                num_choices = int(num_str)
+            except ValueError:
+                raise LoncapaProblemError("answer-pool value should be an integer")
+
+            # Note in the response that answerpool is done.
+            # Both to avoid double-processing, and to feed the logs.
+            if self.has_answerpool():
+                return
+            self._has_answerpool = True  # pylint: disable=W0201
+
+            choices_list = list(choicegroup.getchildren())
+
+            # Remove all choices in the choices_list (we will add some back in later)
+            for choice in choices_list:
+                choicegroup.remove(choice)
+
+            rng = self.get_rng(problem)  # random number generator to use
+            # Sample from the answer pool to get the subset choices and solution id
+            (solution_id, subset_choices) = self.sample_from_answer_pool(choices_list, rng, num_choices)
+
+            # Add back in randomly selected choices
+            for choice in subset_choices:
+                choicegroup.append(choice)
+
+            # Filter out solutions that don't correspond to the correct answer we selected to show
+            # Note that this means that if the user simply provides a <solution> tag, nothing is filtered
+            solutionset = choicegroup.xpath('../following-sibling::solutionset')
+            if len(solutionset) != 0:
+                solutionset = solutionset[0]
+                solutions = solutionset.xpath('./solution')
+                for solution in solutions:
+                    if solution.get('explanation-id') != solution_id:
+                        solutionset.remove(solution)
+
+    def sample_from_answer_pool(self, choices, rng, num_pool):
+        """
+        Takes in:
+            1. list of choices
+            2. random number generator
+            3. the requested size "answer-pool" number, in effect a max
+
+        Returns a tuple with 2 items:
+            1. the solution_id corresponding with the chosen correct answer
+            2. (subset) list of choice nodes with num-1 incorrect and 1 correct
+
+        Raises an error if the number of correct or incorrect choices is 0.
+        """
+
+        correct_choices = []
+        incorrect_choices = []
+
+        for choice in choices:
+            if choice.get('correct') == 'true':
+                correct_choices.append(choice)
+            else:
+                incorrect_choices.append(choice)
+                # In my small test, capa seems to treat the absence of any correct=
+                # attribute as equivalent to ="false", so that's what we do here.
+
+        # We raise an error if the problem is highly ill-formed.
+        # There must be at least one correct and one incorrect choice.
+        # IDEA: perhaps this sort semantic-lint constraint should be generalized to all multichoice
+        # not just down in this corner when answer-pool is used.
+        # Or perhaps in the overall author workflow, these errors are unhelpful and
+        # should all be removed.
+        if len(correct_choices) < 1 or len(incorrect_choices) < 1:
+            raise LoncapaProblemError("Choicegroup must include at least 1 correct and 1 incorrect choice")
+
+        # Limit the number of incorrect choices to what we actually have
+        num_incorrect = num_pool - 1
+        num_incorrect = min(num_incorrect, len(incorrect_choices))
+
+        # Select the one correct choice
+        index = rng.randint(0, len(correct_choices) - 1)
+        correct_choice = correct_choices[index]
+        solution_id = correct_choice.get('explanation-id')
+
+        # Put together the result, pushing most of the work onto rng.shuffle()
+        subset_choices = [correct_choice]
+        rng.shuffle(incorrect_choices)
+        subset_choices += incorrect_choices[:num_incorrect]
+        rng.shuffle(subset_choices)
+
+        return (solution_id, subset_choices)
+
 
 @registry.register
 class TrueFalseResponse(MultipleChoiceResponse):
diff --git a/common/lib/capa/capa/tests/__init__.py b/common/lib/capa/capa/tests/__init__.py
index d5d3f87f819..a8a867724ad 100644
--- a/common/lib/capa/capa/tests/__init__.py
+++ b/common/lib/capa/capa/tests/__init__.py
@@ -52,6 +52,6 @@ def test_capa_system():
     return the_system
 
 
-def new_loncapa_problem(xml, capa_system=None):
+def new_loncapa_problem(xml, capa_system=None, seed=723):
     """Construct a `LoncapaProblem` suitable for unit tests."""
-    return LoncapaProblem(xml, id='1', seed=723, capa_system=capa_system or test_capa_system())
+    return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system())
diff --git a/common/lib/capa/capa/tests/test_answer_pool.py b/common/lib/capa/capa/tests/test_answer_pool.py
new file mode 100644
index 00000000000..e55b15ad7d6
--- /dev/null
+++ b/common/lib/capa/capa/tests/test_answer_pool.py
@@ -0,0 +1,651 @@
+"""
+Tests the logic of the "answer-pool" attribute, e.g.
+  <choicegroup answer-pool="4">
+"""
+
+import unittest
+import textwrap
+from . import test_capa_system, new_loncapa_problem
+from capa.responsetypes import LoncapaProblemError
+
+
+class CapaAnswerPoolTest(unittest.TestCase):
+    """Capa Answer Pool Test"""
+    def setUp(self):
+        super(CapaAnswerPoolTest, self).setUp()
+        self.system = test_capa_system()
+
+    # XML problem setup used by a few tests.
+    common_question_xml = textwrap.dedent("""
+        <problem>
+
+        <p>What is the correct answer?</p>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice" answer-pool="4">
+            <choice correct="false">wrong-1</choice>
+            <choice correct="false">wrong-2</choice>
+            <choice correct="true" explanation-id="solution1">correct-1</choice>
+            <choice correct="false">wrong-3</choice>
+            <choice correct="false">wrong-4</choice>
+            <choice correct="true" explanation-id="solution2">correct-2</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+
+        <solutionset>
+            <solution explanation-id="solution1">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the 1st solution</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+
+            <solution explanation-id="solution2">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the 2nd solution</p>
+            </div>
+            </solution>
+        </solutionset>
+    </problem>
+    """)
+
+    def test_answer_pool_4_choices_1_multiplechoiceresponse_seed1(self):
+        problem = new_loncapa_problem(self.common_question_xml, seed=723)
+        the_html = problem.get_html()
+        # [('choice_3', u'wrong-3'), ('choice_5', u'correct-2'), ('choice_1', u'wrong-2'), ('choice_4', u'wrong-4')]
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
+        self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
+        # Check about masking
+        response = problem.responders.values()[0]
+        self.assertTrue(response.has_mask())
+        self.assertTrue(response.has_answerpool())
+        self.assertEqual(response.unmask_order(), ['choice_3', 'choice_5', 'choice_1', 'choice_4'])
+
+    def test_answer_pool_4_choices_1_multiplechoiceresponse_seed2(self):
+        problem = new_loncapa_problem(self.common_question_xml, seed=9)
+        the_html = problem.get_html()
+        # [('choice_0', u'wrong-1'), ('choice_4', u'wrong-4'), ('choice_3', u'wrong-3'), ('choice_2', u'correct-1')]
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
+        # Check about masking
+        response = problem.responders.values()[0]
+        self.assertTrue(hasattr(response, 'has_mask'))
+        self.assertTrue(hasattr(response, 'has_answerpool'))
+        self.assertEqual(response.unmask_order(), ['choice_0', 'choice_4', 'choice_3', 'choice_2'])
+
+    def test_no_answer_pool_4_choices_1_multiplechoiceresponse(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
+        self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
+        # Check about masking
+        response = problem.responders.values()[0]
+        self.assertFalse(response.has_mask())
+        self.assertFalse(response.has_answerpool())
+
+    def test_0_answer_pool_4_choices_1_multiplechoiceresponse(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="0">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*'1_solution_2'.*\}</div>")
+        response = problem.responders.values()[0]
+        self.assertFalse(response.has_mask())
+        self.assertFalse(response.has_answerpool())
+
+    def test_invalid_answer_pool_value(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="2.3">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+
+        with self.assertRaisesRegexp(LoncapaProblemError, "answer-pool"):
+            new_loncapa_problem(xml_str)
+
+    def test_invalid_answer_pool_none_correct(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="false">wrong!!</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+        </problem>
+        """)
+        with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
+            new_loncapa_problem(xml_str)
+
+    def test_invalid_answer_pool_all_correct(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="true">!wrong-1</choice>
+                <choice correct="true">!wrong-2</choice>
+                <choice correct="true">!wrong-3</choice>
+                <choice correct="true">!wrong-4</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+        </problem>
+        """)
+        with self.assertRaisesRegexp(LoncapaProblemError, "1 correct.*1 incorrect"):
+            new_loncapa_problem(xml_str)
+
+    def test_answer_pool_5_choices_1_multiplechoiceresponse_seed1(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="5">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str, seed=723)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'correct-2'.*'wrong-1'.*'wrong-2'.*.*'wrong-3'.*'wrong-4'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_2'.*\}</div>")
+        response = problem.responders.values()[0]
+        self.assertTrue(response.has_mask())
+        self.assertEqual(response.unmask_order(), ['choice_5', 'choice_0', 'choice_1', 'choice_3', 'choice_4'])
+
+    def test_answer_pool_2_multiplechoiceresponses_seed1(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="3">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+        problem = new_loncapa_problem(xml_str)
+        the_html = problem.get_html()
+
+        str1 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
+        str2 = r"<div>.*\[.*'wrong-2'.*'wrong-1'.*'correct-2'.*\].*</div>"    # rng shared
+        # str2 = r"<div>.*\[.*'correct-2'.*'wrong-2'.*'wrong-3'.*\].*</div>"  # rng independent
+
+        str3 = r"<div>\{.*'1_solution_2'.*\}</div>"
+        str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
+
+        self.assertRegexpMatches(the_html, str1)
+        self.assertRegexpMatches(the_html, str2)
+        self.assertRegexpMatches(the_html, str3)
+        self.assertRegexpMatches(the_html, str4)
+
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
+        self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
+
+    def test_answer_pool_2_multiplechoiceresponses_seed2(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="3">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=9)
+        the_html = problem.get_html()
+
+        str1 = r"<div>.*\[.*'wrong-4'.*'wrong-3'.*'correct-1'.*\].*</div>"
+        str2 = r"<div>.*\[.*'wrong-2'.*'wrong-3'.*'wrong-4'.*'correct-2'.*\].*</div>"
+        str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
+        str4 = r"<div>\{.*'1_solution_4'.*\}</div>"
+
+        self.assertRegexpMatches(the_html, str1)
+        self.assertRegexpMatches(the_html, str2)
+        self.assertRegexpMatches(the_html, str3)
+        self.assertRegexpMatches(the_html, str4)
+
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
+        self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
+
+    def test_answer_pool_random_consistent(self):
+        """
+        The point of this test is to make sure that the exact randomization
+        per seed does not change.
+        """
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="2">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true">correct-2</choice>
+                <choice correct="true">correct-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="3">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true">correct-2</choice>
+                <choice correct="true">correct-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="2">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true">correct-2</choice>
+                <choice correct="true">correct-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="3">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true">correct-2</choice>
+                <choice correct="true">correct-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        the_html = problem.get_html()
+        str1 = (r"<div>.*\[.*'correct-2'.*'wrong-2'.*\].*</div>.*" +
+                r"<div>.*\[.*'wrong-1'.*'correct-2'.*'wrong-4'.*\].*</div>.*" +
+                r"<div>.*\[.*'correct-1'.*'wrong-4'.*\].*</div>.*" +
+                r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*\].*</div>")
+        without_new_lines = the_html.replace("\n", "")
+        self.assertRegexpMatches(without_new_lines, str1)
+
+    def test_no_answer_pool(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str, seed=723)
+        the_html = problem.get_html()
+
+        str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
+
+        self.assertRegexpMatches(the_html, str1)
+        # attributes *not* present
+        response = problem.responders.values()[0]
+        self.assertFalse(response.has_mask())
+        self.assertFalse(response.has_answerpool())
+
+    def test_answer_pool_and_no_answer_pool(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solution>
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true" explanation-id="solution1">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true" explanation-id="solution2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solutionset>
+                <solution explanation-id="solution1">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 1st solution</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+
+                <solution explanation-id="solution2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the 2nd solution</p>
+                </div>
+                </solution>
+            </solutionset>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str, seed=723)
+        the_html = problem.get_html()
+
+        str1 = r"<div>.*\[.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*'wrong-4'.*\].*</div>"
+        str2 = r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>"
+        str3 = r"<div>\{.*'1_solution_1'.*\}</div>"
+        str4 = r"<div>\{.*'1_solution_3'.*\}</div>"
+
+        self.assertRegexpMatches(the_html, str1)
+        self.assertRegexpMatches(the_html, str2)
+        self.assertRegexpMatches(the_html, str3)
+        self.assertRegexpMatches(the_html, str4)
+
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, str1 + r".*" + str2)
+        self.assertRegexpMatches(without_new_lines, str3 + r".*" + str4)
+
+    def test_answer_pool_without_solutionset(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+                <choice correct="false">wrong-4</choice>
+                <choice correct="true">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <solution>
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+
+        </problem>
+        """)
+
+        problem = new_loncapa_problem(xml_str, seed=723)
+        the_html = problem.get_html()
+
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'wrong-3'.*'correct-2'.*'wrong-2'.*'wrong-4'.*\].*</div>")
+        self.assertRegexpMatches(the_html, r"<div>\{.*'1_solution_1'.*\}</div>")
diff --git a/common/lib/capa/capa/tests/test_shuffle.py b/common/lib/capa/capa/tests/test_shuffle.py
new file mode 100644
index 00000000000..4a55268a317
--- /dev/null
+++ b/common/lib/capa/capa/tests/test_shuffle.py
@@ -0,0 +1,306 @@
+"""Tests the capa shuffle and name-masking."""
+
+import unittest
+import textwrap
+
+from . import test_capa_system, new_loncapa_problem
+from capa.responsetypes import LoncapaProblemError
+
+
+class CapaShuffleTest(unittest.TestCase):
+    """Capa problem tests for shuffling and choice-name masking."""
+
+    def setUp(self):
+        super(CapaShuffleTest, self).setUp()
+        self.system = test_capa_system()
+
+    def test_shuffle_4_choices(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        # shuffling 4 things with seed of 0 yields: B A C D
+        # Check that the choices are shuffled
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>")
+        # Check that choice name masking is enabled and that unmasking works
+        response = problem.responders.values()[0]
+        self.assertTrue(response.has_mask())
+        self.assertEqual(response.unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3'])
+        self.assertEqual(the_html, problem.get_html(), 'should be able to call get_html() twice')
+
+    def test_shuffle_custom_names(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" name="aaa">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true" name="ddd">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        # B A C D
+        # Check that the custom name= names come through
+        response = problem.responders.values()[0]
+        self.assertTrue(response.has_mask())
+        self.assertTrue(response.has_shuffle())
+        self.assertEqual(response.unmask_order(), ['choice_0', 'choice_aaa', 'choice_1', 'choice_ddd'])
+
+    def test_shuffle_different_seed(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=341)  # yields D A B C
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Donut'.*'Apple'.*'Banana'.*'Chocolate'.*\].*</div>")
+
+    def test_shuffle_1_choice(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="true">Apple</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*\].*</div>")
+        response = problem.responders.values()[0]
+        self.assertTrue(response.has_mask())
+        self.assertTrue(response.has_shuffle())
+        self.assertEqual(response.unmask_order(), ['choice_0'])
+        self.assertEqual(response.unmask_name('mask_0'), 'choice_0')
+
+    def test_shuffle_6_choices(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Zonut</choice>
+                <choice correct ="false">Eggplant</choice>
+                <choice correct ="false">Filet Mignon</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)  # yields: C E A B D F
+        # Donut -> Zonut to show that there is not some hidden alphabetic ordering going on
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Chocolate'.*'Eggplant'.*'Apple'.*'Banana'.*'Zonut'.*'Filet Mignon'.*\].*</div>")
+
+    def test_shuffle_false(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="false">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Apple'.*'Banana'.*'Chocolate'.*'Donut'.*\].*</div>")
+        response = problem.responders.values()[0]
+        self.assertFalse(response.has_mask())
+        self.assertFalse(response.has_shuffle())
+
+    def test_shuffle_fixed_head_end(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" fixed="true">Alpha</choice>
+                <choice correct="false" fixed="true">Beta</choice>
+                <choice correct="false">A</choice>
+                <choice correct="false">B</choice>
+                <choice correct="false">C</choice>
+                <choice correct ="true">D</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        # Alpha Beta held back from shuffle (head end)
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*\].*</div>")
+
+    def test_shuffle_fixed_tail_end(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">A</choice>
+                <choice correct="false">B</choice>
+                <choice correct="false">C</choice>
+                <choice correct ="true">D</choice>
+                <choice correct="false" fixed="true">Alpha</choice>
+                <choice correct="false" fixed="true">Beta</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        # Alpha Beta held back from shuffle (tail end)
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'B'.*'A'.*'C'.*'D'.*'Alpha'.*'Beta'.*\].*</div>")
+
+    def test_shuffle_fixed_both_ends(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" fixed="true">Alpha</choice>
+                <choice correct="false" fixed="true">Beta</choice>
+                <choice correct="false">A</choice>
+                <choice correct="false">B</choice>
+                <choice correct="false">C</choice>
+                <choice correct ="true">D</choice>
+                <choice correct="false" fixed="true">Psi</choice>
+                <choice correct="false" fixed="true">Omega</choice>
+
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'Beta'.*'B'.*'A'.*'C'.*'D'.*'Psi'.*'Omega'.*\].*</div>")
+
+    def test_shuffle_fixed_both_ends_thin(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" fixed="true">Alpha</choice>
+                <choice correct="false">A</choice>
+                <choice correct="true" fixed="true">Omega</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'Alpha'.*'A'.*'Omega'.*\].*</div>")
+
+    def test_shuffle_fixed_all(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" fixed="true">A</choice>
+                <choice correct="false" fixed="true">B</choice>
+                <choice correct="true" fixed="true">C</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'B'.*'C'.*\].*</div>")
+
+    def test_shuffle_island(self):
+        """A fixed 'island' choice not at the head or tail end gets lumped into the tail end."""
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false" fixed="true">A</choice>
+                <choice correct="false">Mid</choice>
+                <choice correct="true" fixed="true">C</choice>
+                <choice correct="False">Mid</choice>
+                <choice correct="false" fixed="true">D</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<div>.*\[.*'A'.*'Mid'.*'Mid'.*'C'.*'D'.*\].*</div>")
+
+    def test_multiple_shuffle_responses(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            <p>Here is some text</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true">
+                <choice correct="false">A</choice>
+                <choice correct="false">B</choice>
+                <choice correct="false">C</choice>
+                <choice correct ="true">D</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        problem = new_loncapa_problem(xml_str, seed=0)
+        orig_html = problem.get_html()
+        self.assertEqual(orig_html, problem.get_html(), 'should be able to call get_html() twice')
+        html = orig_html.replace('\n', ' ')  # avoid headaches with .* matching
+        print html
+        self.assertRegexpMatches(html, r"<div>.*\[.*'Banana'.*'Apple'.*'Chocolate'.*'Donut'.*\].*</div>.*" +
+                                       r"<div>.*\[.*'C'.*'A'.*'D'.*'B'.*\].*</div>")
+        # Look at the responses in their authored order
+        responses = sorted(problem.responders.values(), key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:]))
+        self.assertTrue(responses[0].has_mask())
+        self.assertTrue(responses[0].has_shuffle())
+        self.assertTrue(hasattr(responses[1], 'has_mask'))
+        self.assertTrue(responses[1].has_shuffle())
+        self.assertEqual(responses[0].unmask_order(), ['choice_1', 'choice_0', 'choice_2', 'choice_3'])
+        self.assertEqual(responses[1].unmask_order(), ['choice_2', 'choice_0', 'choice_3', 'choice_1'])
+
+    def test_shuffle_not_with_answerpool(self):
+        """Raise error if shuffle and answer-pool are both used."""
+        xml_str = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" shuffle="true" answer-pool="4">
+                <choice correct="false" fixed="true">A</choice>
+                <choice correct="false">Mid</choice>
+                <choice correct="true" fixed="true">C</choice>
+                <choice correct="False">Mid</choice>
+                <choice correct="false" fixed="true">D</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+
+        with self.assertRaisesRegexp(LoncapaProblemError, "shuffle and answer-pool"):
+            new_loncapa_problem(xml_str)
diff --git a/common/lib/capa/capa/tests/test_targeted_feedback.py b/common/lib/capa/capa/tests/test_targeted_feedback.py
new file mode 100644
index 00000000000..6e0df87ff14
--- /dev/null
+++ b/common/lib/capa/capa/tests/test_targeted_feedback.py
@@ -0,0 +1,613 @@
+"""
+Tests the logic of the "targeted-feedback" attribute for MultipleChoice questions,
+i.e. those with the <multiplechoiceresponse> element
+"""
+
+import unittest
+import textwrap
+from . import test_capa_system, new_loncapa_problem
+
+
+class CapaTargetedFeedbackTest(unittest.TestCase):
+    '''
+    Testing class
+    '''
+
+    def setUp(self):
+        super(CapaTargetedFeedbackTest, self).setUp()
+        self.system = test_capa_system()
+
+    def test_no_targeted_feedback(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback2">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 2nd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solution explanation-id="feedbackC">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution explanation</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+        </problem>
+
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<div>.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*</div>")
+        self.assertRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC")
+
+    # A targeted-feedback problem shared for a few tests
+    common_targeted_xml = textwrap.dedent("""
+        <problem>
+        <p>What is the correct answer?</p>
+        <multiplechoiceresponse targeted-feedback="">
+          <choicegroup type="MultipleChoice">
+            <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+            <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+            <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+            <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+
+        <targetedfeedbackset>
+            <targetedfeedback explanation-id="feedback1">
+            <div class="detailed-targeted-feedback">
+                <p>Targeted Feedback</p>
+                <p>This is the 1st WRONG solution</p>
+            </div>
+            </targetedfeedback>
+
+            <targetedfeedback explanation-id="feedback2">
+            <div class="detailed-targeted-feedback">
+                <p>Targeted Feedback</p>
+                <p>This is the 2nd WRONG solution</p>
+            </div>
+            </targetedfeedback>
+
+            <targetedfeedback explanation-id="feedback3">
+            <div class="detailed-targeted-feedback">
+                <p>Targeted Feedback</p>
+                <p>This is the 3rd WRONG solution</p>
+            </div>
+            </targetedfeedback>
+
+            <targetedfeedback explanation-id="feedbackC">
+            <div class="detailed-targeted-feedback-correct">
+                <p>Targeted Feedback</p>
+                <p>Feedback on your correct solution...</p>
+            </div>
+            </targetedfeedback>
+
+        </targetedfeedbackset>
+
+        <solution explanation-id="feedbackC">
+        <div class="detailed-solution">
+            <p>Explanation</p>
+            <p>This is the solution explanation</p>
+            <p>Not much to explain here, sorry!</p>
+        </div>
+        </solution>
+    </problem>
+    """)
+
+    def test_targeted_feedback_not_finished(self):
+        problem = new_loncapa_problem(self.common_targeted_xml)
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<div>.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC")
+        self.assertEquals(the_html, problem.get_html(), "Should be able to call get_html() twice")
+
+    def test_targeted_feedback_student_answer1(self):
+        problem = new_loncapa_problem(self.common_targeted_xml)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_3'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback3\">.*3rd WRONG solution")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedbackC")
+        # Check that calling it multiple times yields the same thing
+        the_html2 = problem.get_html()
+        self.assertEquals(the_html, the_html2)
+
+    def test_targeted_feedback_student_answer2(self):
+        problem = new_loncapa_problem(self.common_targeted_xml)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_0'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
+        self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC")
+
+    def test_targeted_feedback_id_typos(self):
+        """Cases where the explanation-id's don't match anything."""
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1TYPO">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackCTYPO">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback2">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 2nd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solution explanation-id="feedbackC">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution explanation</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+        </problem>
+        """)
+
+        # explanation-id does not match anything: fall back to empty targetedfeedbackset
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_0'}
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<targetedfeedbackset>\s*</targetedfeedbackset>")
+
+        # New problem with same XML -- try the correct choice.
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_2'}  # correct
+        the_html = problem.get_html()
+        self.assertRegexpMatches(the_html, r"<targetedfeedbackset>\s*</targetedfeedbackset>")
+
+    def test_targeted_feedback_no_solution_element(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false">wrong-1</choice>
+                <choice correct="false">wrong-2</choice>
+                <choice correct="true"  explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                </div>
+                </targetedfeedback>
+            </targetedfeedbackset>
+            </problem>
+        """)
+
+        # Solution element not found
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_2'}
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+        # </div> right after </targetedfeedbackset>
+        self.assertRegexpMatches(
+            without_new_lines,
+            r"<div>.*<targetedfeedbackset>.*</targetedfeedbackset>\s*</div>"
+        )
+
+    def test_targeted_feedback_show_solution_explanation(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback2">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 2nd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solution explanation-id="feedbackC">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution explanation</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+        </problem>
+
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_0'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
+        self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3")
+        # Check that calling it multiple times yields the same thing
+        the_html2 = problem.get_html()
+        self.assertEquals(the_html, the_html2)
+
+    def test_targeted_feedback_no_show_solution_explanation(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback2">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 2nd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solution explanation-id="feedbackC">
+            <div class="detailed-solution">
+                <p>Explanation</p>
+                <p>This is the solution explanation</p>
+                <p>Not much to explain here, sorry!</p>
+            </div>
+            </solution>
+        </problem>
+
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_0'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
+        self.assertNotRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
+        self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC")
+
+    def test_targeted_feedback_with_solutionset_explanation(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+                <choice correct="true" explanation-id="feedbackC2">correct-2</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback2">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 2nd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC2">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on the other solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solutionset>
+                <solution explanation-id="feedbackC2">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the other solution explanation</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+            </solutionset>
+        </problem>
+
+        """)
+
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_0'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedback1\">.*1st WRONG solution")
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC2\".*other solution explanation")
+        self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3")
+
+    def test_targeted_feedback_no_feedback_for_selected_choice1(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="alwaysShowCorrectChoiceExplanation">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solutionset>
+                <solution explanation-id="feedbackC">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the solution explanation</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+            </solutionset>
+        </problem>
+
+        """)
+
+        # The student choses one with no feedback, but alwaysShowCorrectChoiceExplanation
+        # is in force, so we should see the correct solution feedback.
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_1'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
+        self.assertNotRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3")
+
+    def test_targeted_feedback_no_feedback_for_selected_choice2(self):
+        xml_str = textwrap.dedent("""
+            <problem>
+            <p>What is the correct answer?</p>
+            <multiplechoiceresponse targeted-feedback="">
+              <choicegroup type="MultipleChoice">
+                <choice correct="false" explanation-id="feedback1">wrong-1</choice>
+                <choice correct="false" explanation-id="feedback2">wrong-2</choice>
+                <choice correct="true" explanation-id="feedbackC">correct-1</choice>
+                <choice correct="false" explanation-id="feedback3">wrong-3</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+
+            <targetedfeedbackset>
+                <targetedfeedback explanation-id="feedback1">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 1st WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedback3">
+                <div class="detailed-targeted-feedback">
+                    <p>Targeted Feedback</p>
+                    <p>This is the 3rd WRONG solution</p>
+                </div>
+                </targetedfeedback>
+
+                <targetedfeedback explanation-id="feedbackC">
+                <div class="detailed-targeted-feedback-correct">
+                    <p>Targeted Feedback</p>
+                    <p>Feedback on your correct solution...</p>
+                </div>
+                </targetedfeedback>
+
+            </targetedfeedbackset>
+
+            <solutionset>
+                <solution explanation-id="feedbackC">
+                <div class="detailed-solution">
+                    <p>Explanation</p>
+                    <p>This is the solution explanation</p>
+                    <p>Not much to explain here, sorry!</p>
+                </div>
+                </solution>
+            </solutionset>
+        </problem>
+
+        """)
+
+        # The student chooses one with no feedback set, so we check that there's no feedback.
+        problem = new_loncapa_problem(xml_str)
+        problem.done = True
+        problem.student_answers = {'1_2_1': 'choice_1'}
+
+        the_html = problem.get_html()
+        without_new_lines = the_html.replace("\n", "")
+
+        self.assertNotRegexpMatches(without_new_lines, r"<targetedfeedback explanation-id=\"feedbackC\".*solution explanation")
+        self.assertRegexpMatches(without_new_lines, r"<div>\{.*'1_solution_1'.*\}</div>")
+        self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3|feedbackC")
diff --git a/common/lib/xmodule/xmodule/capa_base.py b/common/lib/xmodule/xmodule/capa_base.py
index 05d2b013863..4a4b498253f 100644
--- a/common/lib/xmodule/xmodule/capa_base.py
+++ b/common/lib/xmodule/xmodule/capa_base.py
@@ -1,5 +1,6 @@
 """Implements basics of Capa, including class CapaModule."""
 import cgi
+import copy
 import datetime
 import hashlib
 import json
@@ -154,6 +155,12 @@ class CapaFields(object):
     student_answers = Dict(help="Dictionary with the current student responses", scope=Scope.user_state)
     done = Boolean(help="Whether the student has answered the problem", scope=Scope.user_state)
     seed = Integer(help="Random seed for this student", scope=Scope.user_state)
+    last_submission_time = Date(help="Last submission time", scope=Scope.user_state)
+    submission_wait_seconds = Integer(
+        display_name="Timer Between Attempts",
+        help="Seconds a student must wait between submissions for a problem with multiple attempts.",
+        scope=Scope.settings,
+        default=0)
     weight = Float(
         display_name="Problem Weight",
         help=("Defines the number of points each problem is worth. "
@@ -313,6 +320,12 @@ class CapaMixin(CapaFields):
         self.student_answers = lcp_state['student_answers']
         self.seed = lcp_state['seed']
 
+    def set_last_submission_time(self):
+        """
+        Set the module's last submission time (when the problem was checked)
+        """
+        self.last_submission_time = datetime.datetime.now(UTC())
+
     def get_score(self):
         """
         Access the problem's score
@@ -751,7 +764,7 @@ class CapaMixin(CapaFields):
         """
         event_info = dict()
         event_info['problem_id'] = self.location.url()
-        self.runtime.track_function('showanswer', event_info)
+        self.track_function_unmask('showanswer', event_info)
         if not self.answer_available():
             raise NotFoundError('Answer is not available')
         else:
@@ -882,7 +895,8 @@ class CapaMixin(CapaFields):
 
         return {'grade': score['score'], 'max_grade': score['total']}
 
-    def check_problem(self, data):
+    # pylint: disable=too-many-statements
+    def check_problem(self, data, override_time=False):
         """
         Checks whether answers to a problem are correct
 
@@ -899,13 +913,17 @@ class CapaMixin(CapaFields):
         event_info['answers'] = answers_without_files
 
         metric_name = u'capa.check_problem.{}'.format
+        # Can override current time
+        current_time = datetime.datetime.now(UTC())
+        if override_time is not False:
+            current_time = override_time
 
         _ = self.runtime.service(self, "i18n").ugettext
 
         # Too late. Cannot submit
         if self.closed():
             event_info['failure'] = 'closed'
-            self.runtime.track_function('problem_check_fail', event_info)
+            self.track_function_unmask('problem_check_fail', event_info)
             if dog_stats_api:
                 dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:closed'])
             raise NotFoundError(_("Problem is closed."))
@@ -913,26 +931,42 @@ class CapaMixin(CapaFields):
         # Problem submitted. Student should reset before checking again
         if self.done and self.rerandomize == "always":
             event_info['failure'] = 'unreset'
-            self.runtime.track_function('problem_check_fail', event_info)
+            self.track_function_unmask('problem_check_fail', event_info)
             if dog_stats_api:
                 dog_stats_api.increment(metric_name('checks'), [u'result:failed', u'failure:unreset'])
             raise NotFoundError(_("Problem must be reset before it can be checked again."))
 
         # Problem queued. Students must wait a specified waittime before they are allowed to submit
+        # IDEA: consider stealing code from below: pretty-print of seconds, cueing of time remaining
         if self.lcp.is_queued():
-            current_time = datetime.datetime.now(UTC())
             prev_submit_time = self.lcp.get_recentmost_queuetime()
+
             waittime_between_requests = self.runtime.xqueue['waittime']
             if (current_time - prev_submit_time).total_seconds() < waittime_between_requests:
                 msg = _(u"You must wait at least {wait} seconds between submissions.").format(
                     wait=waittime_between_requests)
-                return {'success': msg, 'html': ''}  # Prompts a modal dialog in ajax callback
+                return {'success': msg, 'html': ''}
+
+        # Wait time between resets: check if is too soon for submission.
+        if self.last_submission_time is not None and self.submission_wait_seconds != 0:
+             # pylint: disable=maybe-no-member
+             # pylint is unable to verify that .total_seconds() exists
+            if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds:
+                remaining_secs = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds())
+                msg = _(u'You must wait at least {wait_secs} between submissions. {remaining_secs} remaining.').format(
+                    wait_secs=self.pretty_print_seconds(self.submission_wait_seconds),
+                    remaining_secs=self.pretty_print_seconds(remaining_secs))
+                return {
+                    'success': msg,
+                    'html': ''
+                }
 
         try:
             correct_map = self.lcp.grade_answers(answers)
             self.attempts = self.attempts + 1
             self.lcp.done = True
             self.set_state_from_lcp()
+            self.set_last_submission_time()
 
         except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
             log.warning("StudentInputError in capa_module:problem_check",
@@ -981,7 +1015,7 @@ class CapaMixin(CapaFields):
         event_info['success'] = success
         event_info['attempts'] = self.attempts
         event_info['submission'] = self.get_submission_metadata_safe(answers_without_files, correct_map)
-        self.runtime.track_function('problem_check', event_info)
+        self.track_function_unmask('problem_check', event_info)
 
         if dog_stats_api:
             dog_stats_api.increment(metric_name('checks'), [u'result:success'])
@@ -1002,8 +1036,87 @@ class CapaMixin(CapaFields):
 
         return {
             'success': success,
-            'contents': html,
+            'contents': html
         }
+    # pylint: enable=too-many-statements
+
+    def track_function_unmask(self, title, event_info):
+        """
+        All calls to runtime.track_function route through here so that the
+        choice names can be unmasked.
+        """
+        # Do the unmask translates on a copy of event_info,
+        # avoiding problems where an event_info is unmasked twice.
+        event_unmasked = copy.deepcopy(event_info)
+        self.unmask_event(event_unmasked)
+        self.runtime.track_function(title, event_unmasked)
+
+    def unmask_event(self, event_info):
+        """
+        Translates in-place the event_info to account for masking
+        and adds information about permutation options in force.
+        """
+        # answers is like: {u'i4x-Stanford-CS99-problem-dada976e76f34c24bc8415039dee1300_2_1': u'mask_0'}
+        # Each response values has an answer_id which matches the key in answers.
+        for response in self.lcp.responders.values():
+            # Un-mask choice names in event_info for masked responses.
+            if response.has_mask():
+                # We don't assume much about the structure of event_info,
+                # but check for the existence of the things we need to un-mask.
+
+                # Look for answers/id
+                answer = event_info.get('answers', {}).get(response.answer_id)
+                if answer is not None:
+                    event_info['answers'][response.answer_id] = response.unmask_name(answer)
+
+                # Look for state/student_answers/id
+                answer = event_info.get('state', {}).get('student_answers', {}).get(response.answer_id)
+                if answer is not None:
+                    event_info['state']['student_answers'][response.answer_id] = response.unmask_name(answer)
+
+                # Look for old_state/student_answers/id  -- parallel to the above case, happens on reset
+                answer = event_info.get('old_state', {}).get('student_answers', {}).get(response.answer_id)
+                if answer is not None:
+                    event_info['old_state']['student_answers'][response.answer_id] = response.unmask_name(answer)
+
+            # Add 'permutation' to event_info for permuted responses.
+            permutation_option = None
+            if response.has_shuffle():
+                permutation_option = 'shuffle'
+            elif response.has_answerpool():
+                permutation_option = 'answerpool'
+
+            if permutation_option is not None:
+                # Add permutation record tuple: (one of:'shuffle'/'answerpool', [as-displayed list])
+                if not 'permutation' in event_info:
+                    event_info['permutation'] = {}
+                event_info['permutation'][response.answer_id] = (permutation_option, response.unmask_order())
+
+    def pretty_print_seconds(self, num_seconds):
+        """
+        Returns time duration nicely formated, e.g. "3 minutes 4 seconds"
+        """
+        # Here _ is the N variant ungettext that does pluralization with a 3-arg call
+        _ = self.runtime.service(self, "i18n").ungettext
+        hours = num_seconds // 3600
+        sub_hour = num_seconds % 3600
+        minutes = sub_hour // 60
+        seconds = sub_hour % 60
+        display = ""
+        if hours > 0:
+            display += _("{num_hour} hour", "{num_hour} hours", hours).format(num_hour=hours)
+        if minutes > 0:
+            if display != "":
+                display += " "
+            # translators: "minute" refers to a minute of time
+            display += _("{num_minute} minute", "{num_minute} minutes", minutes).format(num_minute=minutes)
+        # Taking care to make "0 seconds" instead of "" for 0 time
+        if seconds > 0 or (hours == 0 and minutes == 0):
+            if display != "":
+                display += " "
+            # translators: "second" refers to a second of time
+            display += _("{num_second} second", "{num_second} seconds", seconds).format(num_second=seconds)
+        return display
 
     def get_submission_metadata_safe(self, answers, correct_map):
         """
@@ -1111,13 +1224,13 @@ class CapaMixin(CapaFields):
 
         if not self.lcp.supports_rescoring():
             event_info['failure'] = 'unsupported'
-            self.runtime.track_function('problem_rescore_fail', event_info)
+            self.track_function_unmask('problem_rescore_fail', event_info)
             # Translators: 'rescoring' refers to the act of re-submitting a student's solution so it can get a new score.
             raise NotImplementedError(_("Problem's definition does not support rescoring."))
 
         if not self.done:
             event_info['failure'] = 'unanswered'
-            self.runtime.track_function('problem_rescore_fail', event_info)
+            self.track_function_unmask('problem_rescore_fail', event_info)
             raise NotFoundError(_("Problem must be answered before it can be graded again."))
 
         # get old score, for comparison:
@@ -1131,12 +1244,12 @@ class CapaMixin(CapaFields):
         except (StudentInputError, ResponseError, LoncapaProblemError) as inst:
             log.warning("Input error in capa_module:problem_rescore", exc_info=True)
             event_info['failure'] = 'input_error'
-            self.runtime.track_function('problem_rescore_fail', event_info)
+            self.track_function_unmask('problem_rescore_fail', event_info)
             return {'success': u"Error: {0}".format(inst.message)}
 
         except Exception as err:
             event_info['failure'] = 'unexpected'
-            self.runtime.track_function('problem_rescore_fail', event_info)
+            self.track_function_unmask('problem_rescore_fail', event_info)
             if self.runtime.DEBUG:
                 msg = u"Error checking problem: {0}".format(err.message)
                 msg += u'\nTraceback:\n' + traceback.format_exc()
@@ -1164,7 +1277,7 @@ class CapaMixin(CapaFields):
         event_info['correct_map'] = correct_map.get_dict()
         event_info['success'] = success
         event_info['attempts'] = self.attempts
-        self.runtime.track_function('problem_rescore', event_info)
+        self.track_function_unmask('problem_rescore', event_info)
 
         # psychometrics should be called on rescoring requests in the same way as check-problem
         if hasattr(self.runtime, 'psychometrics_handler'):  # update PsychometricsData using callback
@@ -1189,7 +1302,7 @@ class CapaMixin(CapaFields):
         # Too late. Cannot submit
         if self.closed() and not self.max_attempts == 0:
             event_info['failure'] = 'closed'
-            self.runtime.track_function('save_problem_fail', event_info)
+            self.track_function_unmask('save_problem_fail', event_info)
             return {
                 'success': False,
                 # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
@@ -1200,7 +1313,7 @@ class CapaMixin(CapaFields):
         # again.
         if self.done and self.rerandomize == "always":
             event_info['failure'] = 'done'
-            self.runtime.track_function('save_problem_fail', event_info)
+            self.track_function_unmask('save_problem_fail', event_info)
             return {
                 'success': False,
                 'msg': _("Problem needs to be reset prior to save.")
@@ -1210,7 +1323,7 @@ class CapaMixin(CapaFields):
 
         self.set_state_from_lcp()
 
-        self.runtime.track_function('save_problem_success', event_info)
+        self.track_function_unmask('save_problem_success', event_info)
         msg = _("Your answers have been saved.")
         if not self.max_attempts == 0:
             msg = _("Your answers have been saved but not graded. Click 'Check' to grade them.")
@@ -1238,7 +1351,7 @@ class CapaMixin(CapaFields):
 
         if self.closed():
             event_info['failure'] = 'closed'
-            self.runtime.track_function('reset_problem_fail', event_info)
+            self.track_function_unmask('reset_problem_fail', event_info)
             return {
                 'success': False,
                 # Translators: 'closed' means the problem's due date has passed. You may no longer attempt to solve the problem.
@@ -1247,7 +1360,7 @@ class CapaMixin(CapaFields):
 
         if not self.done:
             event_info['failure'] = 'not_done'
-            self.runtime.track_function('reset_problem_fail', event_info)
+            self.track_function_unmask('reset_problem_fail', event_info)
             return {
                 'success': False,
                 'error': _("Refresh the page and make an attempt before resetting."),
@@ -1264,7 +1377,7 @@ class CapaMixin(CapaFields):
         self.set_state_from_lcp()
 
         event_info['new_state'] = self.lcp.get_state()
-        self.runtime.track_function('reset_problem', event_info)
+        self.track_function_unmask('reset_problem', event_info)
 
         return {
             'success': True,
diff --git a/common/lib/xmodule/xmodule/css/capa/display.scss b/common/lib/xmodule/xmodule/css/capa/display.scss
index 5524e84e4ca..a4dbf8698bd 100644
--- a/common/lib/xmodule/xmodule/css/capa/display.scss
+++ b/common/lib/xmodule/xmodule/css/capa/display.scss
@@ -126,6 +126,23 @@ div.problem {
     }
   }
 
+  .targeted-feedback-span {
+    > span {
+      margin: $baseline 0;
+      display: block;
+      border: 1px solid #000;
+      padding: 9px 15px $baseline;
+      background: #fff;
+      position: relative;
+      box-shadow: inset 0 0 0 1px #eee;
+      border-radius: 3px;
+
+      &:empty {
+        display: none;
+      }
+    }
+  }
+
   div {
     p {
       &.answer {
@@ -628,6 +645,34 @@ div.problem {
     }
   }
 
+  .detailed-targeted-feedback {
+    > p:first-child {
+      color: red;
+      text-transform: uppercase;
+      font-weight: bold;
+      font-style: normal;
+      font-size: 0.9em;
+    }
+
+    p:last-child {
+      margin-bottom: 0;
+    }
+  }
+
+  .detailed-targeted-feedback-correct {
+    > p:first-child {
+      color: green;
+      text-transform: uppercase;
+      font-weight: bold;
+      font-style: normal;
+      font-size: 0.9em;
+    }
+
+    p:last-child {
+      margin-bottom: 0;
+    }
+  }
+
   div.capa_alert {
     margin-top: $baseline;
     padding: 8px 12px;
diff --git a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee
index 46cccefa9ae..f4401a8b77a 100644
--- a/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee
+++ b/common/lib/xmodule/xmodule/js/spec/problem/edit_spec.coffee
@@ -244,6 +244,105 @@ describe 'MarkdownEditingDescriptor', ->
         </div>
         </solution>
         </problem>""")
+    it 'converts multiple choice shuffle to xml', ->
+      data = MarkdownEditingDescriptor.markdownToXml("""A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.
+        
+        One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.
+        
+        What Apple device competed with the portable CD player?
+        (!x@) The iPad
+        (@) Napster
+        () The iPod
+        ( ) The vegetable peeler
+        ( ) Android
+        (@) The Beatles
+        
+        [Explanation]
+        The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.
+        [Explanation]
+        """)
+      expect(data).toEqual("""<problem>
+        <p>A multiple choice problem presents radio buttons for student input. Students can only select a single option presented. Multiple Choice questions have been the subject of many areas of research due to the early invention and adoption of bubble sheets.</p>
+        
+        <p>One of the main elements that goes into a good multiple choice question is the existence of good distractors. That is, each of the alternate responses presented to the student should be the result of a plausible mistake that a student might make.</p>
+        
+        <p>What Apple device competed with the portable CD player?</p>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice" shuffle="true">
+            <choice correct="true" fixed="true">The iPad</choice>
+            <choice correct="false" fixed="true">Napster</choice>
+            <choice correct="false">The iPod</choice>
+            <choice correct="false">The vegetable peeler</choice>
+            <choice correct="false">Android</choice>
+            <choice correct="false" fixed="true">The Beatles</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+        
+        <solution>
+        <div class="detailed-solution">
+        <p>Explanation</p>
+        
+        <p>The release of the iPod allowed consumers to carry their entire music library with them in a format that did not rely on fragile and energy-intensive spinning disks.</p>
+
+        </div>
+        </solution>
+        </problem>""")
+
+    it 'converts a series of multiplechoice to xml', ->
+      data = MarkdownEditingDescriptor.markdownToXml("""bleh
+        (!x) a
+        () b
+        () c
+        yatta
+        ( ) x
+        ( ) y
+        (x) z
+        testa
+        (!) i
+        ( ) ii
+        (x) iii
+        [Explanation]
+        When the student is ready, the explanation appears.
+        [Explanation]
+        """)
+      expect(data).toEqual("""<problem>
+        <p>bleh</p>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice" shuffle="true">
+            <choice correct="true">a</choice>
+            <choice correct="false">b</choice>
+            <choice correct="false">c</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+        
+        <p>yatta</p>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice">
+            <choice correct="false">x</choice>
+            <choice correct="false">y</choice>
+            <choice correct="true">z</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+        
+        <p>testa</p>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice" shuffle="true">
+            <choice correct="false">i</choice>
+            <choice correct="false">ii</choice>
+            <choice correct="true">iii</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+        
+        <solution>
+        <div class="detailed-solution">
+        <p>Explanation</p>
+        
+        <p>When the student is ready, the explanation appears.</p>
+        
+        </div>
+        </solution>
+        </problem>""")
+
     it 'converts OptionResponse to xml', ->
       data = MarkdownEditingDescriptor.markdownToXml("""OptionResponse gives a limited set of options for students to respond with, and presents those options in a format that encourages them to search for a specific answer rather than being immediately presented with options from which to recognize the correct answer.
 
diff --git a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee
index 35b18e9b15c..9f3c9de9723 100644
--- a/common/lib/xmodule/xmodule/js/src/problem/edit.coffee
+++ b/common/lib/xmodule/xmodule/js/src/problem/edit.coffee
@@ -195,25 +195,35 @@ class @MarkdownEditingDescriptor extends XModule.Descriptor
       xml = xml.replace(/\n^\=\=+$/gm, '');
 
       // group multiple choice answers
-      xml = xml.replace(/(^\s*\(.?\).*?$\n*)+/gm, function (match) {
-          var groupString = '<multiplechoiceresponse>\n',
-              value, correct, options;
-
-          groupString += '  <choicegroup type="MultipleChoice">\n';
-          options = match.split('\n');
-
-          for (i = 0; i < options.length; i += 1) {
-              if(options[i].length > 0) {
-                  value = options[i].split(/^\s*\(.?\)\s*/)[1];
-                  correct = /^\s*\(x\)/i.test(options[i]);
-                  groupString += '    <choice correct="' + correct + '">' + value + '</choice>\n';
-              }
+      xml = xml.replace(/(^\s*\(.{0,3}\).*?$\n*)+/gm, function(match, p) {
+        var choices = '';
+        var shuffle = false;
+        var options = match.split('\n');
+        for(var i = 0; i < options.length; i++) {
+          if(options[i].length > 0) {
+            var value = options[i].split(/^\s*\(.{0,3}\)\s*/)[1];
+            var inparens = /^\s*\((.{0,3})\)\s*/.exec(options[i])[1];
+            var correct = /x/i.test(inparens);
+            var fixed = '';
+            if(/@/.test(inparens)) {
+              fixed = ' fixed="true"';
+            }
+            if(/!/.test(inparens)) {
+              shuffle = true;
+            }
+            choices += '    <choice correct="' + correct + '"' + fixed + '>' + value + '</choice>\n';
           }
-
-          groupString += '  </choicegroup>\n';
-          groupString += '</multiplechoiceresponse>\n\n';
-
-          return groupString;
+        }
+        var result = '<multiplechoiceresponse>\n';
+        if(shuffle) {
+          result += '  <choicegroup type="MultipleChoice" shuffle="true">\n';
+        } else {
+          result += '  <choicegroup type="MultipleChoice">\n';
+        }
+        result += choices;
+        result += '  </choicegroup>\n';
+        result += '</multiplechoiceresponse>\n\n';
+        return result;
       });
 
       // group check answers
diff --git a/common/lib/xmodule/xmodule/tests/test_capa_module.py b/common/lib/xmodule/xmodule/tests/test_capa_module.py
index b741ad2125b..074439d13aa 100644
--- a/common/lib/xmodule/xmodule/tests/test_capa_module.py
+++ b/common/lib/xmodule/xmodule/tests/test_capa_module.py
@@ -82,6 +82,7 @@ class CapaFactory(object):
                attempts=None,
                problem_state=None,
                correct=False,
+               xml=None,
                **kwargs
                ):
         """
@@ -102,7 +103,9 @@ class CapaFactory(object):
         """
         location = Location(["i4x", "edX", "capa_test", "problem",
                              "SampleProblem{0}".format(cls.next_num())])
-        field_data = {'data': cls.sample_problem_xml}
+        if xml is None:
+            xml = cls.sample_problem_xml
+        field_data = {'data': xml}
         field_data.update(kwargs)
         descriptor = Mock(weight="1")
         if problem_state is not None:
@@ -1424,6 +1427,105 @@ class CapaModuleTest(unittest.TestCase):
         module = CapaFactory.create()
         self.assertEquals(module.get_problem("data"), {'html': module.get_problem_html(encapsulate=False)})
 
+    # Standard question with shuffle="true" used by a few tests
+    common_shuffle_xml = textwrap.dedent("""
+        <problem>
+        <multiplechoiceresponse>
+          <choicegroup type="MultipleChoice" shuffle="true">
+            <choice correct="false">Apple</choice>
+            <choice correct="false">Banana</choice>
+            <choice correct="false">Chocolate</choice>
+            <choice correct ="true">Donut</choice>
+          </choicegroup>
+        </multiplechoiceresponse>
+        </problem>
+    """)
+
+    def test_check_unmask(self):
+        """
+        Check that shuffle unmasking is plumbed through: when check_problem is called,
+        unmasked names should appear in the track_function event_info.
+        """
+        module = CapaFactory.create(xml=self.common_shuffle_xml)
+        with patch.object(module.runtime, 'track_function') as mock_track_function:
+            get_request_dict = {CapaFactory.input_key(): 'mask_1'}  # the correct choice
+            module.check_problem(get_request_dict)
+            mock_call = mock_track_function.mock_calls[0]
+            event_info = mock_call[1][1]
+            # 'answers' key modified to use unmasked name
+            self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_3')
+            # 'permutation' key added to record how problem was shown
+            self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
+                              ('shuffle', ['choice_3', 'choice_1', 'choice_2', 'choice_0']))
+            self.assertEquals(event_info['success'], 'correct')
+
+    def test_save_unmask(self):
+        """On problem save, unmasked data should appear on track_function."""
+        module = CapaFactory.create(xml=self.common_shuffle_xml)
+        with patch.object(module.runtime, 'track_function') as mock_track_function:
+            get_request_dict = {CapaFactory.input_key(): 'mask_0'}
+            module.save_problem(get_request_dict)
+            mock_call = mock_track_function.mock_calls[0]
+            event_info = mock_call[1][1]
+            self.assertEquals(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
+            self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
+
+    def test_reset_unmask(self):
+        """On problem reset, unmask names should appear track_function."""
+        module = CapaFactory.create(xml=self.common_shuffle_xml)
+        get_request_dict = {CapaFactory.input_key(): 'mask_0'}
+        module.check_problem(get_request_dict)
+        # On reset, 'old_state' should use unmasked names
+        with patch.object(module.runtime, 'track_function') as mock_track_function:
+            module.reset_problem(None)
+            mock_call = mock_track_function.mock_calls[0]
+            event_info = mock_call[1][1]
+            self.assertEquals(mock_call[1][0], 'reset_problem')
+            self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
+            self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
+
+    def test_rescore_unmask(self):
+        """On problem rescore, unmasked names should appear on track_function."""
+        module = CapaFactory.create(xml=self.common_shuffle_xml)
+        get_request_dict = {CapaFactory.input_key(): 'mask_0'}
+        module.check_problem(get_request_dict)
+        # On rescore, state/student_answers should use unmasked names
+        with patch.object(module.runtime, 'track_function') as mock_track_function:
+            module.rescore_problem()
+            mock_call = mock_track_function.mock_calls[0]
+            event_info = mock_call[1][1]
+            self.assertEquals(mock_call[1][0], 'problem_rescore')
+            self.assertEquals(event_info['state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
+            self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
+
+    def test_check_unmask_answerpool(self):
+        """Check answer-pool question track_function uses unmasked names"""
+        xml = textwrap.dedent("""
+            <problem>
+            <multiplechoiceresponse>
+              <choicegroup type="MultipleChoice" answer-pool="4">
+                <choice correct="false">Apple</choice>
+                <choice correct="false">Banana</choice>
+                <choice correct="false">Chocolate</choice>
+                <choice correct ="true">Donut</choice>
+              </choicegroup>
+            </multiplechoiceresponse>
+            </problem>
+        """)
+        module = CapaFactory.create(xml=xml)
+        with patch.object(module.runtime, 'track_function') as mock_track_function:
+            get_request_dict = {CapaFactory.input_key(): 'mask_0'}
+            module.check_problem(get_request_dict)
+            mock_call = mock_track_function.mock_calls[0]
+            event_info = mock_call[1][1]
+            print event_info
+            # 'answers' key modified to use unmasked name
+            self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
+            # 'permutation' key added to record how problem was shown
+            self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
+                              ('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0']))
+            self.assertEquals(event_info['success'], 'incorrect')
+
 
 class ComplexEncoderTest(unittest.TestCase):
     def test_default(self):
diff --git a/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
new file mode 100644
index 00000000000..aa478c0a174
--- /dev/null
+++ b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py
@@ -0,0 +1,306 @@
+"""
+Tests the logic of problems with a delay between attempt submissions.
+
+Note that this test file is based off of test_capa_module.py and as
+such, uses the same CapaFactory problem setup to test the functionality
+of the check_problem method of a capa module when the "delay between quiz
+submissions" setting is set to different values
+"""
+
+import unittest
+import textwrap
+import datetime
+
+from mock import Mock
+
+import xmodule
+from xmodule.capa_module import CapaModule
+from xmodule.modulestore import Location
+from xblock.field_data import DictFieldData
+from xblock.fields import ScopeIds
+
+from . import get_test_system
+from pytz import UTC
+
+
+class CapaFactoryWithDelay(object):
+    """
+    Create problem modules class, specialized for delay_between_attempts
+    test cases. This factory seems different enough from the one in
+    test_capa_module that unifying them is unattractive.
+    Removed the unused optional arguments.
+    """
+
+    sample_problem_xml = textwrap.dedent("""\
+        <?xml version="1.0"?>
+        <problem>
+            <text>
+                <p>What is pi, to two decimal places?</p>
+            </text>
+        <numericalresponse answer="3.14">
+        <textline math="1" size="30"/>
+        </numericalresponse>
+        </problem>
+    """)
+
+    num = 0
+
+    @classmethod
+    def next_num(cls):
+        """
+        Return the next cls number
+        """
+        cls.num += 1
+        return cls.num
+
+    @classmethod
+    def input_key(cls, input_num=2):
+        """
+        Return the input key to use when passing GET parameters
+        """
+        return ("input_" + cls.answer_key(input_num))
+
+    @classmethod
+    def answer_key(cls, input_num=2):
+        """
+        Return the key stored in the capa problem answer dict
+        """
+        return (
+            "%s_%d_1" % (
+                "-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]),
+                input_num,
+            )
+        )
+
+    @classmethod
+    def create(
+        cls,
+        max_attempts=None,
+        attempts=None,
+        correct=False,
+        last_submission_time=None,
+        submission_wait_seconds=None
+    ):
+        """
+        Optional parameters here are cut down to what we actually use vs. the regular CapaFactory.
+        """
+        location = Location(["i4x", "edX", "capa_test", "problem",
+                             "SampleProblem{0}".format(cls.next_num())])
+        field_data = {'data': cls.sample_problem_xml}
+
+        if max_attempts is not None:
+            field_data['max_attempts'] = max_attempts
+        if last_submission_time is not None:
+            field_data['last_submission_time'] = last_submission_time
+        if submission_wait_seconds is not None:
+            field_data['submission_wait_seconds'] = submission_wait_seconds
+
+        descriptor = Mock(weight="1")
+        if attempts is not None:
+            # converting to int here because I keep putting "0" and "1" in the tests
+            # since everything else is a string.
+            field_data['attempts'] = int(attempts)
+
+        system = get_test_system()
+        system.render_template = Mock(return_value="<div>Test Template HTML</div>")
+        module = CapaModule(
+            descriptor,
+            system,
+            DictFieldData(field_data),
+            ScopeIds(None, None, location, location),
+        )
+
+        if correct:
+            # Could set the internal state formally, but here we just jam in the score.
+            module.get_score = lambda: {'score': 1, 'total': 1}
+        else:
+            module.get_score = lambda: {'score': 0, 'total': 1}
+
+        return module
+
+
+class XModuleQuizAttemptsDelayTest(unittest.TestCase):
+    """
+    Class to test delay between quiz attempts.
+    """
+
+    def create_and_check(self,
+                         num_attempts=None,
+                         last_submission_time=None,
+                         submission_wait_seconds=None,
+                         considered_now=None,
+                         skip_check_problem=False):
+        """Unified create and check code for the tests here."""
+        module = CapaFactoryWithDelay.create(
+            attempts=num_attempts,
+            max_attempts=99,
+            last_submission_time=last_submission_time,
+            submission_wait_seconds=submission_wait_seconds
+        )
+        module.done = False
+        get_request_dict = {CapaFactoryWithDelay.input_key(): "3.14"}
+        if skip_check_problem:
+            return (module, None)
+        if considered_now is not None:
+            result = module.check_problem(get_request_dict, considered_now)
+        else:
+            result = module.check_problem(get_request_dict)
+        return (module, result)
+
+    def test_first_submission(self):
+        # Not attempted yet
+        num_attempts = 0
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=None
+        )
+        # Successfully submitted and answered
+        # Also, the number of attempts should increment by 1
+        self.assertEqual(result['success'], 'correct')
+        self.assertEqual(module.attempts, num_attempts + 1)
+
+    def test_no_wait_time(self):
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime.now(UTC),
+            submission_wait_seconds=0
+        )
+        # Successfully submitted and answered
+        # Also, the number of attempts should increment by 1
+        self.assertEqual(result['success'], 'correct')
+        self.assertEqual(module.attempts, num_attempts + 1)
+
+    def test_submit_quiz_in_rapid_succession(self):
+        # Already attempted once (just now) and thus has a submitted time
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime.now(UTC),
+            submission_wait_seconds=123
+        )
+        # You should get a dialog that tells you to wait
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least.*")
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_too_soon(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=180,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 18, 36)
+        )
+        # You should get a dialog that tells you to wait 2 minutes
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least 3 minutes between submissions. 2 minutes remaining\..*")
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_1_second_too_soon(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=180,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 20, 35)
+        )
+        # You should get a dialog that tells you to wait 2 minutes
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least 3 minutes between submissions. 1 second remaining\..*")
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_as_soon_as_allowed(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=180,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 20, 36)
+        )
+        # Successfully submitted and answered
+        # Also, the number of attempts should increment by 1
+        self.assertEqual(result['success'], 'correct')
+        self.assertEqual(module.attempts, num_attempts + 1)
+
+    def test_submit_quiz_after_delay_expired(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=180,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 24, 0)
+        )
+        # Successfully submitted and answered
+        # Also, the number of attempts should increment by 1
+        self.assertEqual(result['success'], 'correct')
+        self.assertEqual(module.attempts, num_attempts + 1)
+
+    def test_still_cannot_submit_after_max_attempts(self):
+        # Already attempted once (just now) and thus has a submitted time
+        num_attempts = 99
+        # Regular create_and_check should fail
+        with self.assertRaises(xmodule.exceptions.NotFoundError):
+            (module, unused_result) = self.create_and_check(
+                num_attempts=num_attempts,
+                last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+                submission_wait_seconds=180,
+                considered_now=datetime.datetime(2013, 12, 6, 0, 24, 0)
+            )
+
+        # Now try it without the check_problem
+        (module, unused_result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=180,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 24, 0),
+            skip_check_problem=True
+        )
+        # Expect that number of attempts NOT incremented
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_with_long_delay(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=60 * 60 * 2,
+            considered_now=datetime.datetime(2013, 12, 6, 2, 15, 35)
+        )
+        # You should get a dialog that tells you to wait 2 minutes
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least 2 hours between submissions. 2 minutes 1 second remaining\..*")
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_with_involved_pretty_print(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=60 * 60 * 2 + 63,
+            considered_now=datetime.datetime(2013, 12, 6, 1, 15, 40)
+        )
+        # You should get a dialog that tells you to wait 2 minutes
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least 2 hours 1 minute 3 seconds between submissions. 1 hour 2 minutes 59 seconds remaining\..*")
+        self.assertEqual(module.attempts, num_attempts)
+
+    def test_submit_quiz_with_nonplural_pretty_print(self):
+        # Already attempted once (just now)
+        num_attempts = 1
+        (module, result) = self.create_and_check(
+            num_attempts=num_attempts,
+            last_submission_time=datetime.datetime(2013, 12, 6, 0, 17, 36),
+            submission_wait_seconds=60,
+            considered_now=datetime.datetime(2013, 12, 6, 0, 17, 36)
+        )
+        # You should get a dialog that tells you to wait 2 minutes
+        # Also, the number of attempts should not be incremented
+        self.assertRegexpMatches(result['success'], r"You must wait at least 1 minute between submissions. 1 minute remaining\..*")
+        self.assertEqual(module.attempts, num_attempts)
-- 
GitLab