Skip to content
Snippets Groups Projects
Unverified Commit a0468787 authored by Dave St.Germain's avatar Dave St.Germain Committed by GitHub
Browse files

Merge pull request #20677 from edx/dcs/track-cohort-sgp

Added track and cohort filter for CSV score export
parents ff729253 14cab5aa
No related branches found
No related tags found
No related merge requests found
...@@ -4,141 +4,17 @@ This module contains utility functions for grading. ...@@ -4,141 +4,17 @@ This module contains utility functions for grading.
from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
import logging import logging
import time
from datetime import timedelta from datetime import timedelta
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone from django.utils import timezone
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
from courseware.models import StudentModule
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from student.models import CourseEnrollment
from super_csv import ChecksumMixin, CSVProcessor, DeferrableMixin
from .config.waffle import ENFORCE_FREEZE_GRADE_AFTER_COURSE_END, waffle_flags from .config.waffle import ENFORCE_FREEZE_GRADE_AFTER_COURSE_END, waffle_flags
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
class ScoreCSVProcessor(ChecksumMixin, DeferrableMixin, CSVProcessor):
"""
CSV Processor for file format defined for Staff Graded Points
"""
columns = ['user_id', 'username', 'full_name', 'student_uid',
'enrolled', 'track', 'block_id', 'title', 'date_last_graded',
'who_last_graded', 'csum', 'last_points', 'points']
required_columns = ['user_id', 'points', 'csum', 'block_id', 'last_points']
checksum_columns = ['user_id', 'block_id', 'last_points']
# files larger than 100 rows will be processed asynchronously
size_to_defer = 100
max_file_size = 4 * 1024 * 1024
handle_undo = False
def __init__(self, **kwargs):
self.now = time.time()
self.max_points = 1
self.display_name = ''
super(ScoreCSVProcessor, self).__init__(**kwargs)
self.users_seen = {}
def get_unique_path(self):
return 'csv/state/{}/{}'.format(self.block_id, self.now)
def validate_row(self, row):
if not super(ScoreCSVProcessor, self).validate_row(row):
return False
if row['block_id'] != self.block_id:
self.add_error(_('The CSV does not match this problem. Check that you uploaded the right CSV.'))
return False
if row['points']:
try:
if float(row['points']) > self.max_points:
self.add_error(_('Points must not be greater than {}.').format(self.max_points))
return False
except ValueError:
self.add_error(_('Points must be numbers.'))
return False
return True
def preprocess_row(self, row):
if row['points'] and row['user_id'] not in self.users_seen:
to_save = {
'user_id': row['user_id'],
'block_id': self.block_id,
'new_points': float(row['points']),
'max_points': self.max_points
}
self.users_seen[row['user_id']] = 1
return to_save
def process_row(self, row):
"""
Set the score for the given row, returning (status, undo)
undo is a dict of an operation which would undo the set_score. In this case,
that means we would have to call get_score, which could be expensive to do for the entire file.
"""
if self.handle_undo:
# get the current score, for undo. expensive
undo = get_score(row['block_id'], row['user_id'])
undo['new_points'] = undo['score']
undo['max_points'] = row['max_points']
else:
undo = None
set_score(row['block_id'], row['user_id'], row['new_points'], row['max_points'])
return True, undo
def _get_enrollments(self, course_id, **kwargs): # pylint: disable=unused-argument
"""
Return iterator of enrollments, as dicts.
"""
enrollments = CourseEnrollment.objects.filter(
course_id=course_id).select_related('programcourseenrollment')
for enrollment in enrollments:
enrollment_dict = {
'user_id': enrollment.user.id,
'username': enrollment.user.username,
'full_name': enrollment.user.profile.name,
'enrolled': enrollment.is_active,
'track': enrollment.mode,
}
try:
pce = enrollment.programcourseenrollment.program_enrollment
enrollment_dict['student_uid'] = pce.external_user_key
except ObjectDoesNotExist:
enrollment_dict['student_uid'] = None
yield enrollment_dict
def get_rows_to_export(self):
"""
Return iterator of rows for file export.
"""
location = UsageKey.from_string(self.block_id)
my_name = self.display_name
students = get_scores(location)
enrollments = self._get_enrollments(location.course_key)
for enrollment in enrollments:
row = {
'block_id': location,
'title': my_name,
'points': None,
'last_points': None,
'date_last_graded': None,
'who_last_graded': None,
}
row.update(enrollment)
score = students.get(enrollment['user_id'], None)
if score:
row['last_points'] = int(score['grade'] * self.max_points)
row['date_last_graded'] = score['modified']
# TODO: figure out who last graded
yield row
def are_grades_frozen(course_key): def are_grades_frozen(course_key):
""" Returns whether grades are frozen for the given course. """ """ Returns whether grades are frozen for the given course. """
if waffle_flags()[ENFORCE_FREEZE_GRADE_AFTER_COURSE_END].is_enabled(course_key): if waffle_flags()[ENFORCE_FREEZE_GRADE_AFTER_COURSE_END].is_enabled(course_key):
...@@ -148,67 +24,3 @@ def are_grades_frozen(course_key): ...@@ -148,67 +24,3 @@ def are_grades_frozen(course_key):
now = timezone.now() now = timezone.now()
return now > freeze_grade_date return now > freeze_grade_date
return False return False
def set_score(usage_key, student_id, score, max_points, **defaults):
"""
Set a score.
"""
if not isinstance(usage_key, UsageKey):
usage_key = UsageKey.from_string(usage_key)
defaults['module_type'] = 'problem'
defaults['grade'] = score / (max_points or 1.0)
defaults['max_grade'] = max_points
StudentModule.objects.update_or_create(
student_id=student_id,
course_id=usage_key.course_key,
module_state_key=usage_key,
defaults=defaults)
def get_score(usage_key, user_id):
"""
Return score for user_id and usage_key.
"""
if not isinstance(usage_key, UsageKey):
usage_key = UsageKey.from_string(usage_key)
try:
score = StudentModule.objects.get(
course_id=usage_key.course_key,
module_state_key=usage_key,
student_id=user_id
)
except StudentModule.DoesNotExist:
return None
else:
return {
'grade': score.grade,
'score': score.grade * (score.max_grade or 1),
'max_grade': score.max_grade,
'created': score.created,
'modified': score.modified
}
def get_scores(usage_key, user_ids=None):
"""
Return dictionary of student_id: scores.
"""
scores_qset = StudentModule.objects.filter(
course_id=usage_key.course_key,
module_state_key=usage_key,
)
if user_ids:
scores_qset = scores_qset.filter(student_id__in=user_ids)
scores = {}
for row in scores_qset:
scores[row.student_id] = {
'grade': row.grade,
'score': row.grade * (row.max_grade or 1),
'max_grade': row.max_grade,
'created': row.created,
'modified': row.modified,
'state': row.state,
}
return scores
...@@ -13,27 +13,3 @@ class GradesUtilService(object): ...@@ -13,27 +13,3 @@ class GradesUtilService(object):
def are_grades_frozen(self): def are_grades_frozen(self):
"Check if grades are frozen for given course key" "Check if grades are frozen for given course key"
return grade_utils.are_grades_frozen(self.course_id) return grade_utils.are_grades_frozen(self.course_id)
def get_score(self, usage_key, user_id):
"""
Return score for user_id and usage_key.
"""
return grade_utils.get_score(usage_key, user_id)
def get_scores(self, usage_key, user_ids=None):
"""
Return dictionary of student_id: scores.
"""
return grade_utils.get_scores(usage_key, user_ids)
def set_score(self, usage_key, student_id, score, max_points, **defaults):
"""
Set a score.
"""
return grade_utils.set_score(usage_key, student_id, score, max_points, **defaults)
def get_score_processor(self, **kwargs):
"""
Return a csv score processor.
"""
return grade_utils.ScoreCSVProcessor(**kwargs)
...@@ -320,7 +320,7 @@ def migrate_cohort_settings(course): ...@@ -320,7 +320,7 @@ def migrate_cohort_settings(course):
return cohort_settings return cohort_settings
def get_course_cohorts(course, assignment_type=None): def get_course_cohorts(course=None, course_id=None, assignment_type=None):
""" """
Get a list of all the cohorts in the given course. This will include auto cohorts, Get a list of all the cohorts in the given course. This will include auto cohorts,
regardless of whether or not the auto cohorts include any users. regardless of whether or not the auto cohorts include any users.
...@@ -333,11 +333,14 @@ def get_course_cohorts(course, assignment_type=None): ...@@ -333,11 +333,14 @@ def get_course_cohorts(course, assignment_type=None):
A list of CourseUserGroup objects. Empty if there are no cohorts. Does A list of CourseUserGroup objects. Empty if there are no cohorts. Does
not check whether the course is cohorted. not check whether the course is cohorted.
""" """
assert bool(course) ^ bool(course_id), "course or course_id required"
# Migrate cohort settings for this course # Migrate cohort settings for this course
migrate_cohort_settings(course) if course:
migrate_cohort_settings(course)
course_id = course.location.course_key
query_set = CourseUserGroup.objects.filter( query_set = CourseUserGroup.objects.filter(
course_id=course.location.course_key, course_id=course_id,
group_type=CourseUserGroup.COHORT group_type=CourseUserGroup.COHORT
) )
query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set
......
...@@ -45,7 +45,7 @@ def move_to_verified_cohort(sender, instance, **kwargs): # pylint: disable=unus ...@@ -45,7 +45,7 @@ def move_to_verified_cohort(sender, instance, **kwargs): # pylint: disable=unus
log.error(u"Automatic verified cohorting enabled for course '%s', but course is not cohorted.", course_key) log.error(u"Automatic verified cohorting enabled for course '%s', but course is not cohorted.", course_key)
else: else:
course = get_course_by_id(course_key) course = get_course_by_id(course_key)
existing_manual_cohorts = get_course_cohorts(course, CourseCohort.MANUAL) existing_manual_cohorts = get_course_cohorts(course, assignment_type=CourseCohort.MANUAL)
if any(cohort.name == verified_cohort_name for cohort in existing_manual_cohorts): if any(cohort.name == verified_cohort_name for cohort in existing_manual_cohorts):
# Get a random cohort to use as the default cohort (for audit learners). # Get a random cohort to use as the default cohort (for audit learners).
# Note that calling this method will create a "Default Group" random cohort if no random # Note that calling this method will create a "Default Group" random cohort if no random
......
...@@ -226,7 +226,6 @@ sortedcontainers==2.1.0 ...@@ -226,7 +226,6 @@ sortedcontainers==2.1.0
soupsieve==1.9.1 # via beautifulsoup4 soupsieve==1.9.1 # via beautifulsoup4
sqlparse==0.3.0 sqlparse==0.3.0
stevedore==1.30.1 stevedore==1.30.1
git+https://github.com/davestgermain/super-csv@4963bf5da5489f06369da9cce84c92e3e42fe3d2#egg=super-csv==0.1.0
sympy==1.4 sympy==1.4
tincan==0.0.5 # via edx-enterprise tincan==0.0.5 # via edx-enterprise
unicodecsv==0.14.1 unicodecsv==0.14.1
......
...@@ -299,7 +299,6 @@ sphinx==1.8.5 ...@@ -299,7 +299,6 @@ sphinx==1.8.5
sphinxcontrib-websupport==1.1.2 # via sphinx sphinxcontrib-websupport==1.1.2 # via sphinx
sqlparse==0.3.0 sqlparse==0.3.0
stevedore==1.30.1 stevedore==1.30.1
git+https://github.com/davestgermain/super-csv@4963bf5da5489f06369da9cce84c92e3e42fe3d2#egg=super-csv==0.1.0
sympy==1.4 sympy==1.4
testfixtures==6.8.2 testfixtures==6.8.2
text-unidecode==1.2 text-unidecode==1.2
......
...@@ -87,7 +87,6 @@ git+https://github.com/edx/crowdsourcehinter.git@518605f0a95190949fe77bd39158450 ...@@ -87,7 +87,6 @@ git+https://github.com/edx/crowdsourcehinter.git@518605f0a95190949fe77bd39158450
-e git+https://github.com/edx/DoneXBlock.git@01a14f3bd80ae47dd08cdbbe2f88f3eb88d00fba#egg=done-xblock -e git+https://github.com/edx/DoneXBlock.git@01a14f3bd80ae47dd08cdbbe2f88f3eb88d00fba#egg=done-xblock
-e git+https://github.com/edx-solutions/xblock-google-drive.git@138e6fa0bf3a2013e904a085b9fed77dab7f3f21#egg=xblock-google-drive -e git+https://github.com/edx-solutions/xblock-google-drive.git@138e6fa0bf3a2013e904a085b9fed77dab7f3f21#egg=xblock-google-drive
git+https://github.com/edx/xblock-lti-consumer.git@v1.1.8#egg=lti_consumer-xblock==1.1.8 git+https://github.com/edx/xblock-lti-consumer.git@v1.1.8#egg=lti_consumer-xblock==1.1.8
git+https://github.com/davestgermain/super-csv@4963bf5da5489f06369da9cce84c92e3e42fe3d2#egg=super-csv==0.1.0
# Third Party XBlocks # Third Party XBlocks
......
...@@ -286,7 +286,6 @@ sortedcontainers==2.1.0 ...@@ -286,7 +286,6 @@ sortedcontainers==2.1.0
soupsieve==1.9.1 soupsieve==1.9.1
sqlparse==0.3.0 sqlparse==0.3.0
stevedore==1.30.1 stevedore==1.30.1
git+https://github.com/davestgermain/super-csv@4963bf5da5489f06369da9cce84c92e3e42fe3d2#egg=super-csv==0.1.0
sympy==1.4 sympy==1.4
testfixtures==6.8.2 testfixtures==6.8.2
text-unidecode==1.2 # via faker text-unidecode==1.2 # via faker
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment