Skip to content
Snippets Groups Projects
Commit 4de51c8f authored by e0d's avatar e0d
Browse files

Merge pull request #1960 from edx/e0d/release-merge

E0d/release merge
parents 319091a2 d52b959f
No related merge requests found
Showing
with 997 additions and 20 deletions
......@@ -136,7 +136,6 @@ class LocMapperStore(object):
if cached_value:
return cached_value
maps = self.location_map.find(location_id)
maps = list(maps)
if len(maps) == 0:
......
......@@ -258,8 +258,23 @@ class CombinedOpenEndedV1Module():
if not task_states:
return (0, 0, state_values[OpenEndedChild.INITIAL], idx)
final_child_state = json.loads(task_states[-1])
scores = [attempt.get('score', 0) for attempt in final_child_state.get('child_history', [])]
final_task_xml = self.task_xml[-1]
final_child_state_json = task_states[-1]
final_child_state = json.loads(final_child_state_json)
tag_name = self.get_tag_name(final_task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(final_task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=final_child_state_json,
)
scores = task.all_scores()
if scores:
best_score = max(scores)
else:
......
......@@ -679,7 +679,7 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
return {
'success': success,
'error': error_message,
'student_response': data['student_answer'].replace("\n","<br/>")
'student_response': data['student_answer'].replace("\n", "<br/>")
}
def update_score(self, data, system):
......@@ -738,6 +738,44 @@ class OpenEndedModule(openendedchild.OpenEndedChild):
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html
def latest_score(self):
"""None if not available"""
if not self.child_history:
return None
return self.score_for_attempt(-1)
def all_scores(self):
"""None if not available"""
if not self.child_history:
return None
return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]
def score_for_attempt(self, index):
"""
Return sum of rubric scores for ML grading otherwise return attempt["score"].
"""
attempt = self.child_history[index]
score = attempt.get('score')
post_assessment_data = self._parse_score_msg(attempt.get('post_assessment'), self.system)
grader_types = post_assessment_data.get('grader_types')
# According to _parse_score_msg in ML grading there should be only one grader type.
if len(grader_types) == 1 and grader_types[0] == 'ML':
rubric_scores = post_assessment_data.get("rubric_scores")
# Similarly there should be only one list of rubric scores.
if len(rubric_scores) == 1:
rubric_scores_sum = sum(rubric_scores[0])
log.debug("""Score normalized for location={loc}, old_score={old_score},
new_score={new_score}, rubric_score={rubric_score}""".format(
loc=self.location_string,
old_score=score,
new_score=rubric_scores_sum,
rubric_score=rubric_scores
))
return rubric_scores_sum
return score
class OpenEndedDescriptor():
"""
......
......@@ -27,7 +27,9 @@ from xmodule.progress import Progress
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)
from xblock.field_data import DictFieldData
......@@ -358,7 +360,7 @@ class OpenEndedModuleTest(unittest.TestCase):
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
self.definition, self.descriptor, self.static_data, self.metadata)
saved_response = "Saved response."
submitted_response = "Submitted response."
......@@ -369,7 +371,7 @@ class OpenEndedModuleTest(unittest.TestCase):
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer' : saved_response}, get_test_system())
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
......@@ -387,6 +389,7 @@ class OpenEndedModuleTest(unittest.TestCase):
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
......@@ -610,7 +613,6 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})
def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
......@@ -717,6 +719,175 @@ class CombinedOpenEndedModuleTest(unittest.TestCase):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""
# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location(["i4x", "edX", "open_ended", "combinedopenended",
"SampleQuestion"])
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10
metadata = {'attempts': '10', 'max_score': max_score}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=ScopeIds(None, None, None, None),
)
def setUp(self):
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))
def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)
def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
......@@ -948,6 +1119,7 @@ class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)
class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
......@@ -1018,7 +1190,7 @@ class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
})
response = json.loads(response)
......
This diff is collapsed.
......@@ -8,6 +8,9 @@ from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User, Group
from xmodule.modulestore import Location
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError
from xmodule.modulestore.django import loc_mapper
from xmodule.modulestore.locator import CourseLocator
class CourseContextRequired(Exception):
......@@ -134,20 +137,45 @@ class CourseRole(GroupBasedRole):
A named role in a particular course
"""
def __init__(self, role, location, course_context=None):
# pylint: disable=no-member
loc = Location(location)
legacy_group_name = '{0}_{1}'.format(role, loc.course)
if loc.category.lower() == 'course':
course_id = loc.course_id
else:
"""
Location may be either a Location, a string, dict, or tuple which Location will accept
in its constructor, or a CourseLocator. Handle all these giving some preference to
the preferred naming.
"""
# TODO: figure out how to make the group name generation lazy so it doesn't force the
# loc mapping?
if not hasattr(location, 'course_id'):
location = Location(location)
# direct copy from auth.authz.get_all_course_role_groupnames will refactor to one impl asap
groupnames = []
try:
groupnames.append('{0}_{1}'.format(role, location.course_id))
except InvalidLocationError: # will occur on old locations where location is not of category course
if course_context is None:
raise CourseContextRequired()
course_id = course_context
else:
groupnames.append('{0}_{1}'.format(role, course_context))
group_name = '{0}_{1}'.format(role, course_id)
super(CourseRole, self).__init__([group_name, legacy_group_name])
# pylint: disable=no-member
if isinstance(location, Location):
try:
locator = loc_mapper().translate_location(location.course_id, location, False, False)
groupnames.append('{0}_{1}'.format(role, locator.course_id))
except (InvalidLocationError, ItemNotFoundError):
# if it's never been mapped, the auth won't be via the Locator syntax
pass
# least preferred legacy role_course format
groupnames.append('{0}_{1}'.format(role, location.course))
elif isinstance(location, CourseLocator):
# handle old Location syntax
old_location = loc_mapper().translate_locator_to_location(location, get_course=True)
if old_location:
# the slashified version of the course_id (myu/mycourse/myrun)
groupnames.append('{0}_{1}'.format(role, old_location.course_id))
# add the least desirable but sometimes occurring format.
groupnames.append('{0}_{1}'.format(role, old_location.course))
super(CourseRole, self).__init__(groupnames)
class OrgRole(GroupBasedRole):
......
......@@ -23,6 +23,17 @@
<h1>${_("Staff grading")}</h1>
<div class="breadcrumbs"></div>
<div class="error-container"></div>
<br>
<div style="color:red;">
<b>
The issue we had between Dec 3 and Dec 5 with the visibility
of student responses has now been resolved. All responses
should now be visible. Thank you for your patience!
--the edX team
</b>
</div>
<br>
<hr>
<div class="message-container"></div>
......
......@@ -16,6 +16,17 @@
<section class="container">
<div class="combined-notifications" data-ajax_url="${ajax_url}">
<div class="error-container">${error_text}</div>
<br>
<div style="color:red;">
<b>
The issue we had between Dec 3 and Dec 5 with the visibility
of student responses has now been resolved. All responses
should now be visible. Thank you for your patience!
--the edX team
</b>
</div>
<br>
<hr>
<h1>${_("Open Ended Console")}</h1>
<h2>${_("Instructions")}</h2>
......
......@@ -19,6 +19,17 @@
<section class="container">
<div class="open-ended-problems" data-ajax_url="${ajax_url}">
<div class="error-container">${error_text}</div>
<br>
<div style="color:red;">
<b>
The issue we had between Dec 3 and Dec 5 with the visibility
of student responses has now been resolved. All responses
should now be visible. Thank you for your patience!
--the edX team
</b>
</div>
<br>
<hr>
<h1>${_("Flagged Open Ended Problems")}</h1>
<h2>${_("Instructions")}</h2>
......
......@@ -16,6 +16,17 @@
<section class="container">
<div class="open-ended-problems" data-ajax_url="${ajax_url}">
<div class="error-container">${error_text}</div>
<br>
<div style="color:red;">
<b>
The issue we had between Dec 3 and Dec 5 with the visibility
of student responses has now been resolved. All responses
should now be visible. Thank you for your patience!
--the edX team
</b>
</div>
<br>
<hr>
<h1>${_("Open Ended Problems")}</h1>
<h2>${_("Instructions")}</h2>
<p>${_("Here is a list of open ended problems for this course.")}</p>
......
......@@ -15,6 +15,17 @@ criteria.{end_li_tag}
<section class="container peer-grading-container">
<div class="peer-grading" data-ajax-url="${ajax_url}" data-use-single-location="${use_single_location}">
<div class="error-container">${error_text}</div>
<br>
<div style="color:red;">
<b>
The issue we had between Dec 3 and Dec 5 with the visibility
of student responses has now been resolved. All responses
should now be visible. Thank you for your patience!
--the edX team
</b>
</div>
<br>
<hr>
<div class="peer-grading-tools">
<h1 class="peer-grading-title">${_("Peer Grading")}</h1>
<h2 class="peer-grading-instructions">${_("Instructions")}</h2>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment