diff --git a/common/lib/capa/capa/capa_problem.py b/common/lib/capa/capa/capa_problem.py index 8945db4ff6eabe408491330dbc4951d1d371b88b..3d68d2d6043838dc60df89de76ec0fe1fecb6a8f 100644 --- a/common/lib/capa/capa/capa_problem.py +++ b/common/lib/capa/capa/capa_problem.py @@ -154,21 +154,10 @@ class LoncapaProblem(object): def get_max_score(self): ''' Return maximum score for this problem. - We do this by counting the number of answers available for each question - in the problem. If the Response for a question has a get_max_score() method - then we call that and add its return value to the count. That can be - used to give complex problems (eg programming questions) multiple points. ''' maxscore = 0 for response, responder in self.responders.iteritems(): - if hasattr(responder, 'get_max_score'): - try: - maxscore += responder.get_max_score() - except Exception: - log.debug('responder %s failed to properly return from get_max_score()' % responder) # FIXME - raise - else: - maxscore += len(self.responder_answers[response]) + maxscore += responder.get_max_score() return maxscore def get_score(self): diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py index 91d8a8cadcbb1b1137624ad60cf167d7d44e1037..4c6627b3528c2891b0e9d13341c9f3c5a41032c4 100644 --- a/common/lib/capa/capa/responsetypes.py +++ b/common/lib/capa/capa/responsetypes.py @@ -75,7 +75,6 @@ class LoncapaResponse(object): In addition, these methods are optional: - - get_max_score : if defined, this is called to obtain the maximum score possible for this question - setup_response : find and note the answer input field IDs for the response; called by __init__ - check_hint_condition : check to see if the student's answers satisfy a particular condition for a hint to be displayed - render_html : render this Response as HTML (must return XHTML compliant string) @@ -134,6 +133,11 @@ class LoncapaResponse(object): if self.max_inputfields == 1: self.answer_id = self.answer_ids[0] # for convenience + self.maxpoints = dict() + for inputfield in self.inputfields: + maxpoints = inputfield.get('points','1') # By default, each answerfield is worth 1 point + self.maxpoints.update({inputfield.get('id'): int(maxpoints)}) + self.default_answer_map = {} # dict for default answer map (provided in input elements) for entry in self.inputfields: answer = entry.get('correct_answer') @@ -143,6 +147,12 @@ class LoncapaResponse(object): if hasattr(self, 'setup_response'): self.setup_response() + def get_max_score(self): + ''' + Return the total maximum points of all answer fields under this Response + ''' + return sum(self.maxpoints.values()) + def render_html(self, renderer): ''' Return XHTML Element tree representation of this Response. @@ -1067,7 +1077,10 @@ class CodeResponse(LoncapaResponse): (err, self.answer_id, convert_files_to_filenames(student_answers))) raise Exception(err) - self.context.update({'submission': unicode(submission)}) + if is_file(submission): + self.context.update({'submission': submission.name}) + else: + self.context.update({'submission': submission}) # Prepare xqueue request #------------------------------------------------------------ @@ -1114,21 +1127,24 @@ class CodeResponse(LoncapaResponse): def update_score(self, score_msg, oldcmap, queuekey): - (valid_score_msg, correct, score, msg) = self._parse_score_msg(score_msg) + (valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg) if not valid_score_msg: oldcmap.set(self.answer_id, msg='Error: Invalid grader reply.') return oldcmap - correctness = 'incorrect' - if correct: - correctness = 'correct' + correctness = 'correct' if correct else 'incorrect' self.context['correct'] = correctness # TODO: Find out how this is used elsewhere, if any # Replace 'oldcmap' with new grading results if queuekey matches. # If queuekey does not match, we keep waiting for the score_msg whose key actually matches if oldcmap.is_right_queuekey(self.answer_id, queuekey): - oldcmap.set(self.answer_id, correctness=correctness, msg=msg.replace(' ', ' '), queuekey=None) # Queuekey is consumed + # Sanity check on returned points + if points < 0: + points = 0 + elif points > self.maxpoints[self.answer_id]: + points = self.maxpoints[self.answer_id] + oldcmap.set(self.answer_id, npoints=points, correctness=correctness, msg=msg.replace(' ', ' '), queuekey=None) # Queuekey is consumed else: log.debug('CodeResponse: queuekey %s does not match for answer_id=%s.' % (queuekey, self.answer_id)) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index f12b1c2be4f51a3ddfe27bfd0512a527b1051032..46e02542c83d46bb02cb3061faf754f997e2ed4f 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -464,7 +464,7 @@ class CapaModule(XModule): return {'success': msg} log.exception("Error in capa_module problem checking") raise Exception("error in capa_module") - + self.attempts = self.attempts + 1 self.lcp.done = True diff --git a/common/lib/xmodule/xmodule/tests/__init__.py b/common/lib/xmodule/xmodule/tests/__init__.py index 72307951632e862626f0878361669da8af98e354..7b2bd6bc2b2d277d793291ffae3109e8679edfff 100644 --- a/common/lib/xmodule/xmodule/tests/__init__.py +++ b/common/lib/xmodule/xmodule/tests/__init__.py @@ -325,7 +325,8 @@ class CodeResponseTest(unittest.TestCase): new_cmap = CorrectMap() new_cmap.update(old_cmap) - new_cmap.set(answer_id=answer_ids[i], correctness=correctness, msg='MESSAGE', queuekey=None) + npoints = 1 if correctness=='correct' else 0 + new_cmap.set(answer_id=answer_ids[i], npoints=npoints, correctness=correctness, msg='MESSAGE', queuekey=None) test_lcp.update_score(xserver_msgs[correctness], queuekey=1000 + i) self.assertEquals(test_lcp.correct_map.get_dict(), new_cmap.get_dict())