Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
E
edx-platform-release
Manage
Activity
Members
Labels
Plan
Issues
0
Issue boards
Milestones
Wiki
Code
Merge requests
1
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Package Registry
Operate
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Admin message
code.vt.edu will be down for maintenance from 0530-0630 EDT Wednesday, March 26th
Show more breadcrumbs
Hsin-Yu Chien
edx-platform-release
Commits
bef0eb11
Commit
bef0eb11
authored
7 years ago
by
Awais Jibran
Committed by
rabiaiftikhar
6 years ago
Browse files
Options
Downloads
Patches
Plain Diff
updated correct map when new score is calculated
parent
e0d20be1
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
common/lib/xmodule/xmodule/capa_base.py
+11
-5
11 additions, 5 deletions
common/lib/xmodule/xmodule/capa_base.py
common/lib/xmodule/xmodule/tests/test_capa_module.py
+23
-19
23 additions, 19 deletions
common/lib/xmodule/xmodule/tests/test_capa_module.py
with
34 additions
and
24 deletions
common/lib/xmodule/xmodule/capa_base.py
+
11
−
5
View file @
bef0eb11
...
...
@@ -1623,8 +1623,8 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
event_info
[
'
orig_score
'
]
=
orig_score
.
raw_earned
event_info
[
'
orig_total
'
]
=
orig_score
.
raw_possible
try
:
self
.
update_correctness
()
calculated_score
=
self
.
calculate_score
()
except
(
StudentInputError
,
ResponseError
,
LoncapaProblemError
)
as
inst
:
log
.
warning
(
"
Input error in capa_module:problem_rescore
"
,
exc_info
=
True
)
event_info
[
'
failure
'
]
=
'
input_error
'
...
...
@@ -1673,14 +1673,20 @@ class CapaMixin(ScorableXBlockMixin, CapaFields):
"""
return
self
.
score
def
update_correctness
(
self
):
"""
Updates correct map of the LCP.
Operates by creating a new correctness map based on the current
state of the LCP, and updating the old correctness map of the LCP.
"""
new_correct_map
=
self
.
lcp
.
get_grade_from_current_answers
(
None
)
self
.
lcp
.
correct_map
.
update
(
new_correct_map
)
def
calculate_score
(
self
):
"""
Returns the score calculated from the current problem state.
Operates by creating a new correctness map based on the current
state of the LCP, and having the LCP generate a score from that.
"""
new_correctness
=
self
.
lcp
.
get_grade_from_current_answers
(
None
)
new_score
=
self
.
lcp
.
calculate_score
(
new_correctness
)
new_score
=
self
.
lcp
.
calculate_score
()
return
Score
(
raw_earned
=
new_score
[
'
score
'
],
raw_possible
=
new_score
[
'
total
'
])
def
score_from_lcp
(
self
):
...
...
This diff is collapsed.
Click to expand it.
common/lib/xmodule/xmodule/tests/test_capa_module.py
+
23
−
19
View file @
bef0eb11
...
...
@@ -1095,31 +1095,35 @@ class CapaModuleTest(unittest.TestCase):
def
test_rescore_problem_additional_correct
(
self
):
# make sure it also works when new correct answer has been added
module
=
CapaFactory
.
create
(
attempts
=
0
)
answer_id
=
CapaFactory
.
answer_key
()
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
with
patch
(
'
capa.correctmap.CorrectMap.is_correct
'
)
as
mock_is_correct
:
mock_is_correct
.
return_value
=
True
# Check the problem
get_request_dict
=
{
CapaFactory
.
input_key
():
'
1
'
}
result
=
module
.
submit_problem
(
get_request_dict
)
# Check the problem
get_request_dict
=
{
CapaFactory
.
input_key
():
'
1
'
}
result
=
module
.
submit_problem
(
get_request_dict
)
# Expect that the problem is marked correct
self
.
assertEqual
(
result
[
'
success
'
],
'
correct
'
)
# Expect that the problem is marked incorrect and user didn't earn score
self
.
assertEqual
(
result
[
'
success
'
],
'
incorrect
'
)
self
.
assertEqual
(
module
.
get_score
(),
(
0
,
1
))
self
.
assertEqual
(
module
.
correct_map
[
answer_id
][
'
correctness
'
],
'
incorrect
'
)
# Expect that the number of attempts is incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
self
.
assertEqual
(
module
.
get_score
(),
(
1
,
1
))
# Simulate that after adding a new correct answer the new calculated score is (0,1)
# by patching CapaMixin.calculate_score()
# In case of rescore with only_if_higher=True it should not update score of module
# if previous score was higher
with
patch
(
'
xmodule.capa_base.CapaMixin.calculate_score
'
)
as
mock_calculate_score
:
mock_calculate_score
.
return_value
=
Score
(
raw_earned
=
0
,
raw_possible
=
1
)
module
.
rescore
(
only_if_higher
=
True
)
self
.
assertEqual
(
module
.
get_score
(),
(
1
,
1
))
# Simulate that after making an incorrect answer to the correct answer
# the new calculated score is (1,1)
# by patching CorrectMap.is_correct() and NumericalResponse.get_staff_ans()
# In case of rescore with only_if_higher=True it should update score of module
# if previous score was lower
with
patch
(
'
capa.correctmap.CorrectMap.is_correct
'
)
as
mock_is_correct
:
mock_is_correct
.
return_value
=
True
module
.
set_score
(
module
.
score_from_lcp
())
with
patch
(
'
capa.responsetypes.NumericalResponse.get_staff_ans
'
)
as
get_staff_ans
:
get_staff_ans
.
return_value
=
1
+
0j
module
.
rescore
(
only_if_higher
=
True
)
# Expect that the problem is marked correct and user earned the score
self
.
assertEqual
(
module
.
get_score
(),
(
1
,
1
))
self
.
assertEqual
(
module
.
correct_map
[
answer_id
][
'
correctness
'
],
'
correct
'
)
# Expect that the number of attempts is not incremented
self
.
assertEqual
(
module
.
attempts
,
1
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment