diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py
index cd2b81fc1cb422db97442ff2bad78f467fc9b7f1..86f99f26747ba41b90ba81741cc696f2f2833305 100644
--- a/common/lib/capa/capa/responsetypes.py
+++ b/common/lib/capa/capa/responsetypes.py
@@ -48,6 +48,7 @@ import capa.safe_exec as safe_exec
 import capa.xqueue_interface as xqueue_interface
 from openedx.core.djangolib.markup import HTML, Text
 from openedx.core.lib import edx_six
+from openedx.core.lib.grade_utils import round_away_from_zero
 
 from . import correctmap
 from .registry import TagRegistry
@@ -727,7 +728,7 @@ class ChoiceResponse(LoncapaResponse):
         good_non_answers = sum([1 for blank in student_non_answers if blank in self.incorrect_choices])
         edc_current_grade = good_answers + good_non_answers
 
-        return_grade = round(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
+        return_grade = round_away_from_zero(self.get_max_score() * float(edc_current_grade) / float(edc_max_grade), 2)
 
         if edc_current_grade == edc_max_grade:
             return CorrectMap(self.answer_id, correctness='correct')
@@ -764,10 +765,10 @@ class ChoiceResponse(LoncapaResponse):
             return_grade = self.get_max_score()
             return CorrectMap(self.answer_id, correctness='correct', npoints=return_grade)
         elif halves_error_count == 1 and len(all_choices) > 2:
-            return_grade = round(self.get_max_score() / 2.0, 2)
+            return_grade = round_away_from_zero(self.get_max_score() / 2.0, 2)
             return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
         elif halves_error_count == 2 and len(all_choices) > 4:
-            return_grade = round(self.get_max_score() / 4.0, 2)
+            return_grade = round_away_from_zero(self.get_max_score() / 4.0, 2)
             return CorrectMap(self.answer_id, correctness='partially-correct', npoints=return_grade)
         else:
             return CorrectMap(self.answer_id, 'incorrect')
diff --git a/common/lib/xmodule/xmodule/lti_2_util.py b/common/lib/xmodule/xmodule/lti_2_util.py
index 0e15fd1f8be94e710e7bfed125ce63293d96c924..d32f7a41f951d830e667abdf758f2fa5a968a6b9 100644
--- a/common/lib/xmodule/xmodule/lti_2_util.py
+++ b/common/lib/xmodule/xmodule/lti_2_util.py
@@ -18,6 +18,7 @@ from oauthlib.oauth1 import Client
 from six import text_type
 from webob import Response
 from xblock.core import XBlock
+from openedx.core.lib.grade_utils import round_away_from_zero
 
 log = logging.getLogger(__name__)
 
@@ -177,7 +178,7 @@ class LTI20ModuleMixin(object):
             return Response(json.dumps(base_json_obj).encode('utf-8'), content_type=LTI_2_0_JSON_CONTENT_TYPE)
 
         # Fall through to returning grade and comment
-        base_json_obj['resultScore'] = round(self.module_score, 2)
+        base_json_obj['resultScore'] = round_away_from_zero(self.module_score, 2)
         base_json_obj['comment'] = self.score_comment
         return Response(json.dumps(base_json_obj).encode('utf-8'), content_type=LTI_2_0_JSON_CONTENT_TYPE)
 
diff --git a/lms/djangoapps/class_dashboard/dashboard_data.py b/lms/djangoapps/class_dashboard/dashboard_data.py
index a53007b2cc79d4292c9910b5564499d0b2c3b0ba..038625b2662656f9c3faf2bb2413c0beec041713 100644
--- a/lms/djangoapps/class_dashboard/dashboard_data.py
+++ b/lms/djangoapps/class_dashboard/dashboard_data.py
@@ -16,6 +16,7 @@ from lms.djangoapps.instructor_analytics.csvs import create_csv_response
 from util.json_request import JsonResponse
 from xmodule.modulestore.django import modulestore
 from xmodule.modulestore.inheritance import own_metadata
+from openedx.core.lib.grade_utils import round_away_from_zero
 
 # Used to limit the length of list displayed to the screen.
 MAX_SCREEN_LIST_LENGTH = 250
@@ -193,7 +194,7 @@ def get_d3_problem_grade_distrib(course_id):
                             for (grade, count_grade) in problem_info['grade_distrib']:
                                 percent = 0.0
                                 if max_grade > 0:
-                                    percent = round((grade * 100.0) / max_grade, 1)
+                                    percent = round_away_from_zero((grade * 100.0) / max_grade, 1)
 
                                 # Compute percent of students with this grade
                                 student_count_percent = 0
@@ -352,7 +353,7 @@ def get_d3_section_grade_distrib(course_id, section):
             for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
                 percent = 0.0
                 if max_grade > 0:
-                    percent = round((grade * 100.0) / max_grade, 1)
+                    percent = round_away_from_zero((grade * 100.0) / max_grade, 1)
 
                 # Construct tooltip for problem in grade distibution view
                 tooltip = {
@@ -513,7 +514,7 @@ def get_students_problem_grades(request, csv=False):
 
             student_dict['percent'] = 0
             if student['max_grade'] > 0:
-                student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
+                student_dict['percent'] = round_away_from_zero(student['grade'] * 100 / student['max_grade'])
             results.append(student_dict)
 
         max_exceeded = False
@@ -535,7 +536,7 @@ def get_students_problem_grades(request, csv=False):
         for student in students:
             percent = 0
             if student['max_grade'] > 0:
-                percent = round(decimal.Decimal(student['grade'] * 100 / student['max_grade']), 1)
+                percent = round_away_from_zero((student['grade'] * 100 / student['max_grade']), 1)
             results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
 
         response = create_csv_response(filename, header, results)
diff --git a/lms/djangoapps/grades/course_grade.py b/lms/djangoapps/grades/course_grade.py
index dcc5bf363b7b8d899a2c067c105c7b00025c9a3e..fb8181d7daf0d381b97e1d9d8d4a0849c48626d2 100644
--- a/lms/djangoapps/grades/course_grade.py
+++ b/lms/djangoapps/grades/course_grade.py
@@ -12,6 +12,7 @@ from django.conf import settings
 from django.utils.encoding import python_2_unicode_compatible
 from lazy import lazy
 
+from openedx.core.lib.grade_utils import round_away_from_zero
 from xmodule import block_metadata_utils
 
 from .config import assume_zero_if_absent
@@ -296,7 +297,9 @@ class CourseGrade(CourseGradeBase):
         Computes and returns the grade percentage from the given
         result from the grader.
         """
-        return round(grader_result['percent'] * 100 + 0.05) / 100
+
+        # Confused about the addition of .05 here?  See https://openedx.atlassian.net/browse/TNL-6972
+        return round_away_from_zero(grader_result['percent'] * 100 + 0.05) / 100
 
     @staticmethod
     def _compute_letter_grade(grade_cutoffs, percent):
diff --git a/lms/djangoapps/instructor/tests/test_spoc_gradebook.py b/lms/djangoapps/instructor/tests/test_spoc_gradebook.py
index 078d753c6664066c9bed7b68463633a619947c36..1fa34bbf04cae1538b3270f956958ee7eb641eac 100644
--- a/lms/djangoapps/instructor/tests/test_spoc_gradebook.py
+++ b/lms/djangoapps/instructor/tests/test_spoc_gradebook.py
@@ -104,11 +104,11 @@ class TestDefaultGradingPolicy(TestGradebook):
         # Users 1-10 attempted any homework (and get Fs) [10]
         # Users 4-10 scored enough to not get rounded to 0 for the class (and get Fs) [7]
         # One use at top of the page [1]
-        self.assertEqual(22, self.response.content.count(b'grade_F'))
+        self.assertEqual(23, self.response.content.count(b'grade_F'))
 
         # All other grades are None [29 categories * 11 users - 27 non-empty grades = 292]
         # One use at the top of the page [1]
-        self.assertEqual(293, self.response.content.count(b'grade_None'))
+        self.assertEqual(292, self.response.content.count(b'grade_None'))
 
 
 class TestLetterCutoffPolicy(TestGradebook):
diff --git a/openedx/core/lib/grade_utils.py b/openedx/core/lib/grade_utils.py
index 601b994d133709863e6cc3e2eadd796599a5a464..2e7073e962dcf1406382f85d77cd1aa553abb7a8 100644
--- a/openedx/core/lib/grade_utils.py
+++ b/openedx/core/lib/grade_utils.py
@@ -1,6 +1,7 @@
 """
 Helpers functions for grades and scores.
 """
+import math
 
 
 def compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False):
@@ -42,3 +43,26 @@ def is_score_higher_or_equal(earned1, possible1, earned2, possible2, treat_undef
     """
     is_higher_or_equal, _, _ = compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero)
     return is_higher_or_equal
+
+
+def round_away_from_zero(number, digits=0):
+    """
+    Round numbers using the 'away from zero' strategy as opposed to the
+    'Banker's rounding strategy.'  The strategy refers to how we round when
+    a number is half way between two numbers.  eg. 0.5, 1.5, etc. In python 2
+    positive numbers in this category would be rounded up and negative numbers
+    would be rounded down. ie. away from zero.  In python 3 numbers round
+    towards even.  So 0.5 would round to 0 but 1.5 would round to 2.
+
+    See here for more on floating point rounding strategies:
+    https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules
+
+    We want to continue to round away from zero so that student grades remain
+    consistent and don't suddenly change.
+    """
+    p = 10.0 ** digits
+
+    if number >= 0:
+        return float(math.floor((number * p) + 0.5)) / p
+    else:
+        return float(math.ceil((number * p) - 0.5)) / p
diff --git a/openedx/core/lib/tests/test_grade_utils.py b/openedx/core/lib/tests/test_grade_utils.py
index 9c4f08216d993e9aa545307ff082778de32081aa..4d6dd00912834f6fe6475e110515c14f9df144e5 100644
--- a/openedx/core/lib/tests/test_grade_utils.py
+++ b/openedx/core/lib/tests/test_grade_utils.py
@@ -7,7 +7,7 @@ from unittest import TestCase
 
 import ddt
 
-from ..grade_utils import compare_scores
+from ..grade_utils import compare_scores, round_away_from_zero
 
 
 @ddt.ddt
@@ -45,3 +45,15 @@ class TestGradeUtils(TestCase):
         assert is_higher is True
         assert 0 == percentage_1
         assert 0 == percentage_2
+
+    @ddt.data(
+        (0.5, 1),
+        (1.45, 1.5, 1),
+        (-0.5, -1.0),
+        (-0.1, -0.0),
+        (0.1, 0.0),
+        (0.0, 0.0)
+    )
+    @ddt.unpack
+    def test_round_away_from_zero(self, precise, expected_rounded_number, rounding_precision=0):
+        assert round_away_from_zero(precise, rounding_precision) == expected_rounded_number