diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
new file mode 100644
index 0000000000000000000000000000000000000000..ed79ba9c717afd03a761c3df851905a8d9d03e33
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading.coffee
@@ -0,0 +1,27 @@
+# This is a simple class that just hides the error container
+# and message container when they are empty
+# Can (and should be) expanded upon when our problem list 
+# becomes more sophisticated
+class PeerGrading
+  constructor: () ->
+    @error_container = $('.error-container')
+    @error_container.toggle(not @error_container.is(':empty'))
+
+    @message_container = $('.message-container')
+    @message_container.toggle(not @message_container.is(':empty'))
+  
+    @problem_list = $('.problem-list')
+    @construct_progress_bar()
+
+  construct_progress_bar: () =>
+    problems = @problem_list.find('tr').next()
+    problems.each( (index, element) =>
+      problem = $(element)
+      progress_bar = problem.find('.progress-bar')
+      bar_value = parseInt(problem.data('graded'))
+      bar_max = parseInt(problem.data('required')) + bar_value
+      progress_bar.progressbar({value: bar_value, max: bar_max})
+    )
+    
+
+$(document).ready(() -> new PeerGrading())
diff --git a/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
new file mode 100644
index 0000000000000000000000000000000000000000..ab16b34d12c26bf6e6210f05ee743d5fe9c3f56f
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/src/peergrading/peer_grading_problem.coffee
@@ -0,0 +1,478 @@
+##################################
+#
+#  This is the JS that renders the peer grading problem page.
+#  Fetches the correct problem and/or calibration essay
+#  and sends back the grades
+#
+#  Should not be run when we don't have a location to send back
+#  to the server
+#
+#  PeerGradingProblemBackend - 
+#   makes all the ajax requests and provides a mock interface
+#   for testing purposes
+#
+#  PeerGradingProblem -
+#   handles the rendering and user interactions with the interface
+#
+##################################
+class PeerGradingProblemBackend
+  constructor: (ajax_url, mock_backend) ->
+    @mock_backend = mock_backend
+    @ajax_url = ajax_url
+    @mock_cnt = 0
+
+  post: (cmd, data, callback) ->
+    if @mock_backend
+      callback(@mock(cmd, data))
+    else
+      # if this post request fails, the error callback will catch it
+      $.post(@ajax_url + cmd, data, callback)
+        .error => callback({success: false, error: "Error occured while performing this operation"})
+
+  mock: (cmd, data) ->
+    if cmd == 'is_student_calibrated'
+      # change to test each version
+      response = 
+        success: true 
+        calibrated: @mock_cnt >= 2
+    else if cmd == 'show_calibration_essay'
+      #response = 
+      #  success: false
+      #  error: "There was an error"
+      @mock_cnt++
+      response = 
+        success: true
+        submission_id: 1
+        submission_key: 'abcd'
+        student_response: '''
+            Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.
+
+The standard chunk of Lorem Ipsum used since the 1500s is reproduced below for those interested. Sections 1.10.32 and 1.10.33 from "de Finibus Bonorum et Malorum" by Cicero are also reproduced in their exact original form, accompanied by English versions from the 1914 translation by H. Rackham.
+            '''
+        prompt: '''
+            	<h2>S11E3: Metal Bands</h2>
+<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
+<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
+<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
+            '''
+        rubric: '''
+<table class="rubric"><tbody><tr><th>Purpose</th>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
+            </td>
+        </tr><tr><th>Organization</th>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
+            </td>
+        </tr></tbody></table>
+            '''
+        max_score: 4
+    else if cmd == 'get_next_submission'
+      response = 
+        success: true
+        submission_id: 1
+        submission_key: 'abcd'
+        student_response: '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed nec tristique ante. Proin at mauris sapien, quis varius leo. Morbi laoreet leo nisi. Morbi aliquam lacus ante. Cras iaculis velit sed diam mattis a fermentum urna luctus. Duis consectetur nunc vitae felis facilisis eget vulputate risus viverra. Cras consectetur ullamcorper lobortis. Nam eu gravida lorem. Nulla facilisi. Nullam quis felis enim. Mauris orci lectus, dictum id cursus in, vulputate in massa.
+
+Phasellus non varius sem. Nullam commodo lacinia odio sit amet egestas. Donec ullamcorper sapien sagittis arcu volutpat placerat. Phasellus ut pretium ante. Nam dictum pulvinar nibh dapibus tristique. Sed at tellus mi, fringilla convallis justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus tristique rutrum nulla sed eleifend. Praesent at nunc arcu. Mauris condimentum faucibus nibh, eget commodo quam viverra sed. Morbi in tincidunt dolor. Morbi sed augue et augue interdum fermentum.
+
+Curabitur tristique purus ac arcu consequat cursus. Cras diam felis, dignissim quis placerat at, aliquet ac metus. Mauris vulputate est eu nibh imperdiet varius. Cras aliquet rhoncus elit a laoreet. Mauris consectetur erat et erat scelerisque eu faucibus dolor consequat. Nam adipiscing sagittis nisl, eu mollis massa tempor ac. Nulla scelerisque tempus blandit. Phasellus ac ipsum eros, id posuere arcu. Nullam non sapien arcu. Vivamus sit amet lorem justo, ac tempus turpis. Suspendisse pharetra gravida imperdiet. Pellentesque lacinia mi eu elit luctus pellentesque. Sed accumsan libero a magna elementum varius. Nunc eget pellentesque metus. '''
+        prompt: '''
+            	<h2>S11E3: Metal Bands</h2>
+<p>Shown below are schematic band diagrams for two different metals. Both diagrams appear different, yet both of the elements are undisputably metallic in nature.</p>
+<p>* Why is it that both sodium and magnesium behave as metals, even though the s-band of magnesium is filled? </p>
+<p>This is a self-assessed open response question. Please use as much space as you need in the box below to answer the question.</p>
+            '''
+        rubric: '''
+<table class="rubric"><tbody><tr><th>Purpose</th>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-0" value="0"><label for="score-0-0">No product</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-1" value="1"><label for="score-0-1">Unclear purpose or main idea</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-2" value="2"><label for="score-0-2">Communicates an identifiable purpose and/or main idea for an audience</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-0" id="score-0-3" value="3"><label for="score-0-3">Achieves a clear and distinct purpose for a targeted audience and communicates main ideas with effectively used techniques to introduce and represent ideas and insights</label>
+            </td>
+        </tr><tr><th>Organization</th>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-0" value="0"><label for="score-1-0">No product</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-1" value="1"><label for="score-1-1">Organization is unclear; introduction, body, and/or conclusion are underdeveloped, missing or confusing.</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-2" value="2"><label for="score-1-2">Organization is occasionally unclear; introduction, body or conclusion may be underdeveloped.</label>
+            </td>
+                
+            <td>
+                    <input type="radio" class="score-selection" name="score-selection-1" id="score-1-3" value="3"><label for="score-1-3">Organization is clear and easy to follow; introduction, body and conclusion are defined and aligned with purpose.</label>
+            </td>
+        </tr></tbody></table>
+            '''
+        max_score: 4
+    else if cmd == 'save_calibration_essay'
+      response = 
+        success: true
+        actual_score: 2
+    else if cmd == 'save_grade'
+      response = 
+        success: true
+
+    return response
+
+
+class PeerGradingProblem
+  constructor: (backend) ->
+    @prompt_wrapper = $('.prompt-wrapper')
+    @backend = backend
+    
+
+    # get the location of the problem
+    @location = $('.peer-grading').data('location')
+    # prevent this code from trying to run 
+    # when we don't have a location
+    if(!@location)
+      return
+
+    # get the other elements we want to fill in
+    @submission_container = $('.submission-container')
+    @prompt_container = $('.prompt-container')
+    @rubric_container = $('.rubric-container')
+    @flag_student_container = $('.flag-student-container')
+    @calibration_panel = $('.calibration-panel')
+    @grading_panel = $('.grading-panel')
+    @content_panel = $('.content-panel')
+    @grading_message = $('.grading-message')
+    @grading_message.hide()
+
+    @grading_wrapper =$('.grading-wrapper')
+    @calibration_feedback_panel = $('.calibration-feedback')
+    @interstitial_page = $('.interstitial-page')
+    @interstitial_page.hide()
+
+    @error_container = $('.error-container')
+
+    @submission_key_input = $("input[name='submission-key']")
+    @essay_id_input = $("input[name='essay-id']")
+    @feedback_area = $('.feedback-area')
+
+    @score_selection_container = $('.score-selection-container')
+    @rubric_selection_container = $('.rubric-selection-container')
+    @grade = null
+    @calibration = null
+
+    @submit_button = $('.submit-button')
+    @action_button = $('.action-button')
+    @calibration_feedback_button = $('.calibration-feedback-button')
+    @interstitial_page_button = $('.interstitial-page-button')
+    @flag_student_checkbox = $('.flag-checkbox')
+
+    Collapsible.setCollapsibles(@content_panel)
+
+    # Set up the click event handlers
+    @action_button.click -> history.back()
+    @calibration_feedback_button.click => 
+      @calibration_feedback_panel.hide()
+      @grading_wrapper.show()
+      @is_calibrated_check()
+
+    @interstitial_page_button.click =>
+      @interstitial_page.hide()
+      @is_calibrated_check()
+
+    @is_calibrated_check()
+
+
+  ##########
+  #
+  #  Ajax calls to the backend
+  #
+  ##########
+  is_calibrated_check: () =>
+    @backend.post('is_student_calibrated', {location: @location}, @calibration_check_callback)
+
+  fetch_calibration_essay: () =>
+    @backend.post('show_calibration_essay', {location: @location}, @render_calibration)
+
+  fetch_submission_essay: () =>
+    @backend.post('get_next_submission', {location: @location}, @render_submission)
+
+  # finds the scores for each rubric category
+  get_score_list: () =>
+    # find the number of categories:
+    num_categories = $('table.rubric tr').length
+
+    score_lst = []
+    # get the score for each one
+    for i in [0..(num_categories-1)]
+      score = $("input[name='score-selection-#{i}']:checked").val()
+      score_lst.push(score)
+
+    return score_lst
+
+  construct_data: () ->
+    data =
+      rubric_scores: @get_score_list()
+      score: @grade
+      location: @location
+      submission_id: @essay_id_input.val()
+      submission_key: @submission_key_input.val()
+      feedback: @feedback_area.val()
+      submission_flagged: @flag_student_checkbox.is(':checked')
+    return data
+
+
+  submit_calibration_essay: ()=>
+    data = @construct_data()
+    @backend.post('save_calibration_essay', data, @calibration_callback)
+
+  submit_grade: () =>
+    data = @construct_data()
+    @backend.post('save_grade', data, @submission_callback)
+    
+
+  ##########
+  #
+  #  Callbacks for various events
+  #
+  ##########
+
+  # called after we perform an is_student_calibrated check
+  calibration_check_callback: (response) =>
+    if response.success
+      # if we haven't been calibrating before
+       if response.calibrated and (@calibration == null or @calibration == false)
+         @calibration = false
+         @fetch_submission_essay()
+      # If we were calibrating before and no longer need to,
+      # show the interstitial page
+       else if response.calibrated and @calibration == true
+         @calibration = false
+         @render_interstitial_page()
+       else
+         @calibration = true
+         @fetch_calibration_essay()
+    else if response.error
+      @render_error(response.error)
+    else
+      @render_error("Error contacting the grading service")
+
+
+  # called after we submit a calibration score
+  calibration_callback: (response) =>
+    if response.success
+      @render_calibration_feedback(response)
+    else if response.error
+      @render_error(response.error)
+    else 
+      @render_error("Error saving calibration score")
+
+  # called after we submit a submission score
+  submission_callback: (response) =>
+    if response.success
+      @is_calibrated_check()
+      @grading_message.fadeIn()
+      @grading_message.html("<p>Grade sent successfully.</p>")
+    else
+      if response.error
+        @render_error(response.error)
+      else
+        @render_error("Error occurred while submitting grade")
+
+  # called after a grade is selected on the interface
+  graded_callback: (event) =>
+    @grade = $("input[name='grade-selection']:checked").val()
+    if @grade == undefined
+      return
+    # check to see whether or not any categories have not been scored
+    num_categories = $('table.rubric tr').length
+    for i in [0..(num_categories-1)]
+      score = $("input[name='score-selection-#{i}']:checked").val()
+      if score == undefined
+        return
+    # show button if we have scores for all categories
+    @show_submit_button()
+
+  
+      
+  ##########
+  #
+  #  Rendering methods and helpers
+  #
+  ##########
+  # renders a calibration essay
+  render_calibration: (response) =>
+    if response.success
+
+      # load in all the data
+      @submission_container.html("<h3>Training Essay</h3>")
+      @render_submission_data(response)
+      # TODO: indicate that we're in calibration mode 
+      @calibration_panel.addClass('current-state')
+      @grading_panel.removeClass('current-state')
+
+      # Display the right text
+      # both versions of the text are written into the template itself
+      # we only need to show/hide the correct ones at the correct time
+      @calibration_panel.find('.calibration-text').show()
+      @grading_panel.find('.calibration-text').show()
+      @calibration_panel.find('.grading-text').hide()
+      @grading_panel.find('.grading-text').hide()
+      @flag_student_container.hide()
+
+      @submit_button.unbind('click')
+      @submit_button.click @submit_calibration_essay
+
+    else if response.error
+      @render_error(response.error)
+    else
+      @render_error("An error occurred while retrieving the next calibration essay")
+
+  # Renders a student submission to be graded
+  render_submission: (response) =>
+    if response.success
+      @submit_button.hide()
+      @submission_container.html("<h3>Submitted Essay</h3>")
+      @render_submission_data(response)
+
+      @calibration_panel.removeClass('current-state')
+      @grading_panel.addClass('current-state')
+
+      # Display the correct text
+      # both versions of the text are written into the template itself
+      # we only need to show/hide the correct ones at the correct time
+      @calibration_panel.find('.calibration-text').hide()
+      @grading_panel.find('.calibration-text').hide()
+      @calibration_panel.find('.grading-text').show()
+      @grading_panel.find('.grading-text').show()
+      @flag_student_container.show()
+
+      @submit_button.unbind('click')
+      @submit_button.click @submit_grade
+    else if response.error
+      @render_error(response.error)
+    else
+      @render_error("An error occured when retrieving the next submission.")
+
+
+  make_paragraphs: (text) ->
+    paragraph_split = text.split(/\n\s*\n/)
+    new_text = ''
+    for paragraph in paragraph_split
+      new_text += "<p>#{paragraph}</p>"
+    return new_text
+
+  # render common information between calibration and grading
+  render_submission_data: (response) =>
+    @content_panel.show()
+
+    @submission_container.append(@make_paragraphs(response.student_response))
+    @prompt_container.html(response.prompt)
+    @rubric_selection_container.html(response.rubric)
+    @submission_key_input.val(response.submission_key)
+    @essay_id_input.val(response.submission_id)
+    @setup_score_selection(response.max_score)
+
+    @submit_button.hide()
+    @action_button.hide()
+    @calibration_feedback_panel.hide()
+
+
+  render_calibration_feedback: (response) =>
+    # display correct grade
+    @calibration_feedback_panel.slideDown()
+    calibration_wrapper = $('.calibration-feedback-wrapper')
+    calibration_wrapper.html("<p>The score you gave was: #{@grade}. The actual score is: #{response.actual_score}</p>")
+
+
+    score = parseInt(@grade)
+    actual_score = parseInt(response.actual_score)
+
+    if score == actual_score
+      calibration_wrapper.append("<p>Congratulations! Your score matches the actual score!</p>")
+    else
+      calibration_wrapper.append("<p>Please try to understand the grading critera better to be more accurate next time.</p>") 
+
+    # disable score selection and submission from the grading interface
+    $("input[name='score-selection']").attr('disabled', true)
+    @submit_button.hide()
+    
+  render_interstitial_page: () =>
+    @content_panel.hide()
+    @interstitial_page.show()
+
+  render_error: (error_message) =>
+      @error_container.show()
+      @calibration_feedback_panel.hide()
+      @error_container.html(error_message)
+      @content_panel.hide()
+      @action_button.show()
+
+  show_submit_button: () =>
+    @submit_button.show()
+
+  setup_score_selection: (max_score) =>
+    
+    # first, get rid of all the old inputs, if any.
+    @score_selection_container.html("""
+    <h3>Overall Score</h3>
+    <p>Choose an overall score for this submission.</p>
+    """)
+
+    # Now create new labels and inputs for each possible score.
+    for score in [0..max_score]
+      id = 'score-' + score
+      label = """<label for="#{id}">#{score}</label>"""
+      
+      input = """
+              <input type="radio" name="grade-selection" id="#{id}" value="#{score}"/>
+              """       # "  fix broken parsing in emacs
+      @score_selection_container.append(input + label)
+
+    # And now hook up an event handler again
+    $("input[name='score-selection']").change @graded_callback
+    $("input[name='grade-selection']").change @graded_callback
+
+
+
+mock_backend = false
+ajax_url = $('.peer-grading').data('ajax_url')
+backend = new PeerGradingProblemBackend(ajax_url, mock_backend)
+$(document).ready(() -> new PeerGradingProblem(backend))
diff --git a/common/lib/xmodule/xmodule/peer_grading_module.py b/common/lib/xmodule/xmodule/peer_grading_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..8002a8d9233870632b0fcef96122c26f439586bd
--- /dev/null
+++ b/common/lib/xmodule/xmodule/peer_grading_module.py
@@ -0,0 +1,439 @@
+"""
+This module provides an interface on the grading-service backend
+for peer grading
+
+Use peer_grading_service() to get the version specified
+in settings.PEER_GRADING_INTERFACE
+
+"""
+import json
+import logging
+import requests
+from requests.exceptions import RequestException, ConnectionError, HTTPError
+import sys
+
+from django.conf import settings
+from django.http import HttpResponse, Http404
+from grading_service import GradingService
+from grading_service import GradingServiceError
+
+from courseware.access import has_access
+from util.json_request import expect_json
+from xmodule.course_module import CourseDescriptor
+from xmodule.combined_open_ended_rubric import CombinedOpenEndedRubric
+from student.models import unique_id_for_user
+from lxml import etree
+
+import copy
+from fs.errors import ResourceNotFoundError
+import itertools
+import json
+import logging
+from lxml import etree
+from lxml.html import rewrite_links
+from path import path
+import os
+import sys
+
+from pkg_resources import resource_string
+from .capa_module import only_one, ComplexEncoder
+
+from peer_grading_service import peer_grading_service
+
+log = logging.getLogger(__name__)
+
+class PeerGradingModule(XModule):
+    _VERSION = 1
+
+    js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
+                     resource_string(__name__, 'js/src/collapsible.coffee'),
+                     resource_string(__name__, 'js/src/javascript_loader.coffee'),
+                     ]}
+    js_module_name = "PeerGrading"
+
+    css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
+
+    def __init__(self, system, location, definition, descriptor,
+                 instance_state=None, shared_state=None, **kwargs):
+        XModule.__init__(self, system, location, definition, descriptor,
+            instance_state, shared_state, **kwargs)
+
+        # Load instance state
+        if instance_state is not None:
+            instance_state = json.loads(instance_state)
+        else:
+            instance_state = {}
+
+        #We need to set the location here so the child modules can use it
+        system.set('location', location)
+        self.peer_gs = peer_grading_service()
+        log.debug(self.system)
+
+    def _err_response(self, msg):
+        """
+        Return a HttpResponse with a json dump with success=False, and the given error message.
+        """
+        return HttpResponse(json.dumps({'success': False, 'error': msg}),
+            mimetype="application/json")
+
+    def _check_required(self, get, required):
+        actual = set(get.keys())
+        missing = required - actual
+        if len(missing) > 0:
+            return False, "Missing required keys: {0}".format(', '.join(missing))
+        else:
+            return True, ""
+
+    def get_html(self):
+        """
+         Needs to be implemented by inheritors.  Renders the HTML that students see.
+        @return:
+        """
+        pass
+
+    def handle_ajax(self, dispatch, get):
+        """
+        Needs to be implemented by child modules.  Handles AJAX events.
+        @return:
+        """
+
+        handlers = {
+            'get_next_submission': self.get_next_submission,
+            'show_calibration_essay': self.show_calibration_essay,
+            'save_post_assessment': self.message_post,
+            'is_student_calibrated': self.is_student_calibrated,
+            'save_grade': self.save_grade,
+            'save_calibration_essay' : self.save_calibration_essay,
+            }
+
+        if dispatch not in handlers:
+            return 'Error'
+
+        before = self.get_progress()
+        d = handlers[dispatch](get)
+        after = self.get_progress()
+        d.update({
+            'progress_changed': after != before,
+            'progress_status': Progress.to_js_status_str(after),
+            })
+        return json.dumps(d, cls=ComplexEncoder)
+
+    def get_next_submission(self, get):
+        """
+        Makes a call to the grading controller for the next essay that should be graded
+        Returns a json dict with the following keys:
+
+        'success': bool
+
+        'submission_id': a unique identifier for the submission, to be passed back
+                         with the grade.
+
+        'submission': the submission, rendered as read-only html for grading
+
+        'rubric': the rubric, also rendered as html.
+
+        'submission_key': a key associated with the submission for validation reasons
+
+        'error': if success is False, will have an error message with more info.
+        """
+        _check_post(request)
+        required = set(['location'])
+        success, message = _check_required(request, required)
+        if not success:
+            return _err_response(message)
+        grader_id = unique_id_for_user(request.user)
+        p = request.POST
+        location = p['location']
+
+        try:
+            response = peer_grading_service().get_next_submission(location, grader_id)
+            return HttpResponse(response,
+                mimetype="application/json")
+        except GradingServiceError:
+            log.exception("Error getting next submission.  server url: {0}  location: {1}, grader_id: {2}"
+            .format(peer_grading_service().url, location, grader_id))
+            return json.dumps({'success': False,
+                               'error': 'Could not connect to grading service'})
+
+    def save_grade(self, get):
+        """
+        Saves the grade of a given submission.
+        Input:
+            The request should have the following keys:
+            location - problem location
+            submission_id - id associated with this submission
+            submission_key - submission key given for validation purposes
+            score - the grade that was given to the submission
+            feedback - the feedback from the student
+        Returns
+            A json object with the following keys:
+            success: bool indicating whether the save was a success
+            error: if there was an error in the submission, this is the error message
+        """
+        _check_post(request)
+        required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]', 'submission_flagged'])
+        success, message = _check_required(request, required)
+        if not success:
+            return _err_response(message)
+        grader_id = unique_id_for_user(request.user)
+        p = request.POST
+        location = p['location']
+        submission_id = p['submission_id']
+        score = p['score']
+        feedback = p['feedback']
+        submission_key = p['submission_key']
+        rubric_scores = p.getlist('rubric_scores[]')
+        submission_flagged = p['submission_flagged']
+        try:
+            response = peer_grading_service().save_grade(location, grader_id, submission_id,
+                score, feedback, submission_key, rubric_scores, submission_flagged)
+            return HttpResponse(response, mimetype="application/json")
+        except GradingServiceError:
+            log.exception("""Error saving grade.  server url: {0}, location: {1}, submission_id:{2},
+                            submission_key: {3}, score: {4}"""
+            .format(peer_grading_service().url,
+                location, submission_id, submission_key, score)
+            )
+            return json.dumps({'success': False,
+                               'error': 'Could not connect to grading service'})
+
+
+
+    def is_student_calibrated(self, get):
+        """
+        Calls the grading controller to see if the given student is calibrated
+        on the given problem
+
+        Input:
+            In the request, we need the following arguments:
+            location - problem location
+
+        Returns:
+            Json object with the following keys
+            success - bool indicating whether or not the call was successful
+            calibrated - true if the grader has fully calibrated and can now move on to grading
+                       - false if the grader is still working on calibration problems
+            total_calibrated_on_so_far - the number of calibration essays for this problem
+                that this grader has graded
+        """
+        _check_post(request)
+        required = set(['location'])
+        success, message = _check_required(request, required)
+        if not success:
+            return _err_response(message)
+        grader_id = unique_id_for_user(request.user)
+        p = request.POST
+        location = p['location']
+
+        try:
+            response = peer_grading_service().is_student_calibrated(location, grader_id)
+            return HttpResponse(response, mimetype="application/json")
+        except GradingServiceError:
+            log.exception("Error from grading service.  server url: {0}, grader_id: {0}, location: {1}"
+            .format(peer_grading_service().url, grader_id, location))
+            return json.dumps({'success': False,
+                               'error': 'Could not connect to grading service'})
+
+
+
+    def show_calibration_essay(self, get):
+        """
+        Fetch the next calibration essay from the grading controller and return it
+        Inputs:
+            In the request
+            location - problem location
+
+        Returns:
+            A json dict with the following keys
+            'success': bool
+
+            'submission_id': a unique identifier for the submission, to be passed back
+                             with the grade.
+
+            'submission': the submission, rendered as read-only html for grading
+
+            'rubric': the rubric, also rendered as html.
+
+            'submission_key': a key associated with the submission for validation reasons
+
+            'error': if success is False, will have an error message with more info.
+
+        """
+        _check_post(request)
+
+        required = set(['location'])
+        success, message = _check_required(request, required)
+        if not success:
+            return _err_response(message)
+
+        grader_id = unique_id_for_user(request.user)
+        p = request.POST
+        location = p['location']
+        try:
+            response = peer_grading_service().show_calibration_essay(location, grader_id)
+            return HttpResponse(response, mimetype="application/json")
+        except GradingServiceError:
+            log.exception("Error from grading service.  server url: {0}, location: {0}"
+            .format(peer_grading_service().url, location))
+            return json.dumps({'success': False,
+                               'error': 'Could not connect to grading service'})
+        # if we can't parse the rubric into HTML,
+        except etree.XMLSyntaxError:
+            log.exception("Cannot parse rubric string. Raw string: {0}"
+            .format(rubric))
+            return json.dumps({'success': False,
+                               'error': 'Error displaying submission'})
+
+
+    def save_calibration_essay(self, get):
+        """
+        Saves the grader's grade of a given calibration.
+        Input:
+            The request should have the following keys:
+            location - problem location
+            submission_id - id associated with this submission
+            submission_key - submission key given for validation purposes
+            score - the grade that was given to the submission
+            feedback - the feedback from the student
+        Returns
+            A json object with the following keys:
+            success: bool indicating whether the save was a success
+            error: if there was an error in the submission, this is the error message
+            actual_score: the score that the instructor gave to this calibration essay
+
+        """
+        _check_post(request)
+
+        required = set(['location', 'submission_id', 'submission_key', 'score', 'feedback', 'rubric_scores[]'])
+        success, message = _check_required(request, required)
+        if not success:
+            return _err_response(message)
+        grader_id = unique_id_for_user(request.user)
+        p = request.POST
+        location = p['location']
+        calibration_essay_id = p['submission_id']
+        submission_key = p['submission_key']
+        score = p['score']
+        feedback = p['feedback']
+        rubric_scores = p.getlist('rubric_scores[]')
+
+        try:
+            response = peer_grading_service().save_calibration_essay(location, grader_id, calibration_essay_id,
+                submission_key, score, feedback, rubric_scores)
+            return HttpResponse(response, mimetype="application/json")
+        except GradingServiceError:
+            log.exception("Error saving calibration grade, location: {0}, submission_id: {1}, submission_key: {2}, grader_id: {3}".format(location, submission_id, submission_key, grader_id))
+            return _err_response('Could not connect to grading service')
+    def peer_grading(self, request, course_id):
+        '''
+        Show a peer grading interface
+        '''
+
+        # call problem list service
+        success = False
+        error_text = ""
+        problem_list = []
+        try:
+            problem_list_json = self.peer_gs.get_problem_list(course_id, unique_id_for_user(request.user))
+            problem_list_dict = json.loads(problem_list_json)
+            success = problem_list_dict['success']
+            if 'error' in problem_list_dict:
+                error_text = problem_list_dict['error']
+
+            problem_list = problem_list_dict['problem_list']
+
+        except GradingServiceError:
+            error_text = "Error occured while contacting the grading service"
+            success = False
+        # catch error if if the json loads fails
+        except ValueError:
+            error_text = "Could not get problem list"
+            success = False
+
+        ajax_url = _reverse_with_slash('peer_grading', course_id)
+
+        return self.system.render_template('peer_grading/peer_grading.html', {
+            'course': course,
+            'course_id': course_id,
+            'ajax_url': ajax_url,
+            'success': success,
+            'problem_list': problem_list,
+            'error_text': error_text,
+            # Checked above
+            'staff_access': False, })
+
+
+    def peer_grading_problem(request, course_id):
+        '''
+        Show individual problem interface
+        '''
+        course = get_course_with_access(request.user, course_id, 'load')
+        problem_location = request.GET.get("location")
+
+        ajax_url = _reverse_with_slash('peer_grading', course_id)
+
+        return render_to_response('peer_grading/peer_grading_problem.html', {
+            'view_html': '',
+            'course': course,
+            'problem_location': problem_location,
+            'course_id': course_id,
+            'ajax_url': ajax_url,
+            # Checked above
+            'staff_access': False, })
+
+class PeerGradingDescriptor(XmlDescriptor, EditingDescriptor):
+    """
+    Module for adding combined open ended questions
+    """
+    mako_template = "widgets/html-edit.html"
+    module_class = CombinedOpenEndedModule
+    filename_extension = "xml"
+
+    stores_state = True
+    has_score = True
+    template_dir_name = "peer_grading"
+
+    js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
+    js_module_name = "HTMLEditingDescriptor"
+
+    @classmethod
+    def definition_from_xml(cls, xml_object, system):
+        """
+        Pull out the individual tasks, the rubric, and the prompt, and parse
+
+        Returns:
+        {
+        'rubric': 'some-html',
+        'prompt': 'some-html',
+        'task_xml': dictionary of xml strings,
+        }
+        """
+        expected_children = []
+        for child in expected_children:
+            if len(xml_object.xpath(child)) == 0:
+                raise ValueError("Peer grading definition must include at least one '{0}' tag".format(child))
+
+        def parse_task(k):
+            """Assumes that xml_object has child k"""
+            return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]
+
+        def parse(k):
+            """Assumes that xml_object has child k"""
+            return xml_object.xpath(k)[0]
+
+        return {}
+
+
+    def definition_to_xml(self, resource_fs):
+        '''Return an xml element representing this definition.'''
+        elt = etree.Element('peergrading')
+
+        def add_child(k):
+            child_str = '<{tag}>{body}</{tag}>'.format(tag=k, body=self.definition[k])
+            child_node = etree.fromstring(child_str)
+            elt.append(child_node)
+
+        for child in ['task']:
+            add_child(child)
+
+        return elt
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/peer_grading_service.py b/common/lib/xmodule/xmodule/peer_grading_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2a5d72b6cd2f09db12bbca5ce7bc230b2def116
--- /dev/null
+++ b/common/lib/xmodule/xmodule/peer_grading_service.py
@@ -0,0 +1,256 @@
+from .capa_module import only_one, ComplexEncoder
+from .editing_module import EditingDescriptor
+from .html_checker import check_html
+from progress import Progress
+from .stringify import stringify_children
+from .x_module import XModule
+from .xml_module import XmlDescriptor
+from xmodule.modulestore import Location
+import self_assessment_module
+import open_ended_module
+from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
+from .stringify import stringify_children
+import json
+import logging
+import requests
+from requests.exceptions import RequestException, ConnectionError, HTTPError
+import sys
+
+from django.conf import settings
+from django.http import HttpResponse, Http404
+
+from courseware.access import has_access
+from util.json_request import expect_json
+from xmodule.course_module import CourseDescriptor
+from combined_open_ended_rubric import CombinedOpenEndedRubric, RubricParsingError
+from lxml import etree
+
+
+
+from django.conf import settings
+
+class PeerGradingService():
+    """
+    Interface with the grading controller for peer grading
+    """
+    def __init__(self, config):
+        self.username = config['username']
+        self.password = config['password']
+        self.url = config['url']
+        self.login_url = self.url + '/login/'
+        self.session = requests.session()
+        self.get_next_submission_url = self.url + '/get_next_submission/'
+        self.save_grade_url = self.url + '/save_grade/'
+        self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
+        self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
+        self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
+        self.get_problem_list_url = self.url + '/get_problem_list/'
+        self.get_notifications_url = self.url + '/get_notifications/'
+
+    def get_next_submission(self, problem_location, grader_id):
+        response = self.get(self.get_next_submission_url,
+            {'location': problem_location, 'grader_id': grader_id})
+        return json.dumps(self._render_rubric(response))
+
+    def save_grade(self, location, grader_id, submission_id, score, feedback, submission_key, rubric_scores, submission_flagged):
+        data = {'grader_id' : grader_id,
+                'submission_id' : submission_id,
+                'score' : score,
+                'feedback' : feedback,
+                'submission_key': submission_key,
+                'location': location,
+                'rubric_scores': rubric_scores,
+                'rubric_scores_complete': True,
+                'submission_flagged' : submission_flagged}
+        return self.post(self.save_grade_url, data)
+
+    def is_student_calibrated(self, problem_location, grader_id):
+        params = {'problem_id' : problem_location, 'student_id': grader_id}
+        return self.get(self.is_student_calibrated_url, params)
+
+    def show_calibration_essay(self, problem_location, grader_id):
+        params = {'problem_id' : problem_location, 'student_id': grader_id}
+        response = self.get(self.show_calibration_essay_url, params)
+        return json.dumps(self._render_rubric(response))
+
+    def save_calibration_essay(self, problem_location, grader_id, calibration_essay_id, submission_key,
+                               score, feedback, rubric_scores):
+        data = {'location': problem_location,
+                'student_id': grader_id,
+                'calibration_essay_id': calibration_essay_id,
+                'submission_key': submission_key,
+                'score': score,
+                'feedback': feedback,
+                'rubric_scores[]': rubric_scores,
+                'rubric_scores_complete': True}
+        return self.post(self.save_calibration_essay_url, data)
+
+    def get_problem_list(self, course_id, grader_id):
+        params = {'course_id': course_id, 'student_id': grader_id}
+        response = self.get(self.get_problem_list_url, params)
+        return response
+
+    def get_notifications(self, course_id, grader_id):
+        params = {'course_id': course_id, 'student_id': grader_id}
+        response = self.get(self.get_notifications_url, params)
+        return response
+
+    def _login(self):
+        """
+        Log into the staff grading service.
+
+        Raises requests.exceptions.HTTPError if something goes wrong.
+
+        Returns the decoded json dict of the response.
+        """
+        response = self.session.post(self.login_url,
+            {'username': self.username,
+             'password': self.password,})
+
+        response.raise_for_status()
+
+        return response.json
+
+    def post(self, url, data, allow_redirects=False):
+        """
+        Make a post request to the grading controller
+        """
+        try:
+            op = lambda: self.session.post(url, data=data,
+                allow_redirects=allow_redirects)
+            r = self._try_with_login(op)
+        except (RequestException, ConnectionError, HTTPError) as err:
+            # reraise as promised GradingServiceError, but preserve stacktrace.
+            raise GradingServiceError, str(err), sys.exc_info()[2]
+
+        return r.text
+
+    def get(self, url, params, allow_redirects=False):
+        """
+        Make a get request to the grading controller
+        """
+        log.debug(params)
+        op = lambda: self.session.get(url,
+            allow_redirects=allow_redirects,
+            params=params)
+        try:
+            r = self._try_with_login(op)
+        except (RequestException, ConnectionError, HTTPError) as err:
+            # reraise as promised GradingServiceError, but preserve stacktrace.
+            raise GradingServiceError, str(err), sys.exc_info()[2]
+
+        return r.text
+
+
+    def _try_with_login(self, operation):
+        """
+        Call operation(), which should return a requests response object.  If
+        the request fails with a 'login_required' error, call _login() and try
+        the operation again.
+
+        Returns the result of operation().  Does not catch exceptions.
+        """
+        response = operation()
+        if (response.json
+            and response.json.get('success') == False
+            and response.json.get('error') == 'login_required'):
+            # apparrently we aren't logged in.  Try to fix that.
+            r = self._login()
+            if r and not r.get('success'):
+                log.warning("Couldn't log into peer grading backend. Response: %s",
+                    r)
+                # try again
+            response = operation()
+            response.raise_for_status()
+
+        return response
+
+    def _render_rubric(self, response, view_only=False):
+        """
+        Given an HTTP Response with the key 'rubric', render out the html
+        required to display the rubric and put it back into the response
+
+        returns the updated response as a dictionary that can be serialized later
+
+        """
+        try:
+            response_json = json.loads(response)
+            if 'rubric' in response_json:
+                rubric = response_json['rubric']
+                rubric_renderer = CombinedOpenEndedRubric(self.system, False)
+                success, rubric_html = rubric_renderer.render_rubric(rubric)
+                response_json['rubric'] = rubric_html
+            return response_json
+        # if we can't parse the rubric into HTML, 
+        except etree.XMLSyntaxError, RubricParsingError:
+            log.exception("Cannot parse rubric string. Raw string: {0}"
+            .format(rubric))
+            return {'success': False,
+                    'error': 'Error displaying submission'}
+        except ValueError:
+            log.exception("Error parsing response: {0}".format(response))
+            return {'success': False,
+                    'error': "Error displaying submission"}
+
+"""
+This is a mock peer grading service that can be used for unit tests
+without making actual service calls to the grading controller
+"""
+class MockPeerGradingService(object):
+    def get_next_submission(self, problem_location, grader_id):
+        return json.dumps({'success': True,
+                           'submission_id':1,
+                           'submission_key': "",
+                           'student_response': 'fake student response',
+                           'prompt': 'fake submission prompt',
+                           'rubric': 'fake rubric',
+                           'max_score': 4})
+
+    def save_grade(self, location, grader_id, submission_id,
+                   score, feedback, submission_key):
+        return json.dumps({'success': True})
+
+    def is_student_calibrated(self, problem_location, grader_id):
+        return json.dumps({'success': True, 'calibrated': True})
+
+    def show_calibration_essay(self, problem_location, grader_id):
+        return json.dumps({'success': True,
+                           'submission_id':1,
+                           'submission_key': '',
+                           'student_response': 'fake student response',
+                           'prompt': 'fake submission prompt',
+                           'rubric': 'fake rubric',
+                           'max_score': 4})
+
+    def save_calibration_essay(self, problem_location, grader_id,
+                               calibration_essay_id, submission_key, score, feedback):
+        return {'success': True, 'actual_score': 2}
+
+    def get_problem_list(self, course_id, grader_id):
+        return json.dumps({'success': True,
+                           'problem_list': [
+                               json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo1',
+                                           'problem_name': "Problem 1", 'num_graded': 3, 'num_pending': 5}),
+                               json.dumps({'location': 'i4x://MITx/3.091x/problem/open_ended_demo2',
+                                           'problem_name': "Problem 2", 'num_graded': 1, 'num_pending': 5})
+                           ]})
+
+_service = None
+def peer_grading_service():
+    """
+    Return a peer grading service instance--if settings.MOCK_PEER_GRADING is True,
+    returns a mock one, otherwise a real one.
+
+    Caches the result, so changing the setting after the first call to this
+    function will have no effect.
+    """
+    global _service
+    if _service is not None:
+        return _service
+
+    if settings.MOCK_PEER_GRADING:
+        _service = MockPeerGradingService()
+    else:
+        _service = PeerGradingService(settings.PEER_GRADING_INTERFACE)
+
+    return _service