本文整理汇总了Python中submissions.api.get_submission函数的典型用法代码示例。如果您正苦于以下问题:Python get_submission函数的具体用法?Python get_submission怎么用?Python get_submission使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_submission函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_old_submission
def test_get_old_submission(self):
# hack in an old-style submission, this can't be created with the ORM (EDUCATOR-1090)
with transaction.atomic():
student_item = StudentItem.objects.create()
connection.cursor().execute("""
INSERT INTO submissions_submission
(id, uuid, attempt_number, submitted_at, created_at, raw_answer, student_item_id, status)
VALUES (
{}, {}, {}, {}, {}, {}, {}, {}
);""".format(
1,
"\'deadbeef-1234-5678-9100-1234deadbeef\'",
1,
"\'2017-07-13 17:56:02.656129\'",
"\'2017-07-13 17:56:02.656129\'",
"\'{\"parts\":[{\"text\":\"raw answer text\"}]}\'",
int(student_item.id),
"\'A\'"
), []
)
with mock.patch.object(
Submission.objects, 'raw',
wraps=Submission.objects.raw
) as mock_raw:
_ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
self.assertEqual(1, mock_raw.call_count)
# On subsequent accesses we still get the submission, but raw() isn't needed
mock_raw.reset_mock()
_ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
self.assertEqual(0, mock_raw.call_count)
开发者ID:edx,项目名称:edx-submissions,代码行数:32,代码来源:test_api.py
示例2: test_load_non_json_answer
def test_load_non_json_answer(self):
# This should never happen, if folks are using the public API.
# Create a submission with a raw answer that is NOT valid JSON
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_model = Submission.objects.get(uuid=submission['uuid'])
sub_model.raw_answer = ''
sub_model.save()
with self.assertRaises(api.SubmissionInternalError):
api.get_submission(sub_model.uuid)
with self.assertRaises(api.SubmissionInternalError):
api.get_submission_and_student(sub_model.uuid)
开发者ID:YoshidaKS,项目名称:edx-ora2,代码行数:13,代码来源:test_api.py
示例3: test_caching
def test_caching(self):
sub = api.create_submission(STUDENT_ITEM, "Hello World!")
# The first request to get the submission hits the database...
with self.assertNumQueries(1):
db_sub = api.get_submission(sub["uuid"])
# The next one hits the cache only...
with self.assertNumQueries(0):
cached_sub = api.get_submission(sub["uuid"])
# The data that gets passed back matches the original in both cases
self.assertEqual(sub, db_sub)
self.assertEqual(sub, cached_sub)
开发者ID:YoshidaKS,项目名称:edx-ora2,代码行数:14,代码来源:test_api.py
示例4: test_load_non_json_answer
def test_load_non_json_answer(self):
submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_model = Submission.objects.get(uuid=submission['uuid'])
# This should never happen, if folks are using the public API.
# Create a submission with a raw answer that is NOT valid JSON
query = "UPDATE submissions_submission SET raw_answer = '}' WHERE id = %s"
connection.cursor().execute(query, [str(sub_model.id)])
transaction.commit_unless_managed()
with self.assertRaises(api.SubmissionInternalError):
api.get_submission(sub_model.uuid)
with self.assertRaises(api.SubmissionInternalError):
api.get_submission_and_student(sub_model.uuid)
开发者ID:roopakgk,项目名称:edx-submissions,代码行数:15,代码来源:test_api.py
示例5: self_path_and_context
def self_path_and_context(self):
"""
Determine the template path and context to use when rendering the self-assessment step.
Returns:
tuple of `(path, context)`, where `path` (str) is the path to the template,
and `context` (dict) is the template context.
Raises:
SubmissionError: Error occurred while retrieving the current submission.
SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
"""
context = {}
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
# We display the due date whether the problem is open or closed.
# If no date is set, it defaults to the distant future, in which
# case we don't display the date.
if due_date < DISTANT_FUTURE:
context['self_due'] = due_date
# If we haven't submitted yet, `workflow` will be an empty dict,
# and `workflow_status` will be None.
workflow = self.get_workflow_info()
workflow_status = workflow.get('status')
self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
if self_complete:
path = 'openassessmentblock/self/oa_self_complete.html'
elif workflow_status == 'self' or problem_closed:
assessment = self_api.get_assessment(workflow.get("submission_uuid"))
if assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif problem_closed:
if reason == 'start':
context["self_start"] = start_date
path = 'openassessmentblock/self/oa_self_unavailable.html'
elif reason == 'due':
path = 'openassessmentblock/self/oa_self_closed.html'
else:
submission = submission_api.get_submission(self.submission_uuid)
context["rubric_criteria"] = self.rubric_criteria_with_labels
context["estimated_time"] = "20 minutes" # TODO: Need to configure this.
context["self_submission"] = submission
# Determine if file upload is supported for this XBlock.
context["allow_file_upload"] = self.allow_file_upload
context['self_file_url'] = self.get_download_url_from_submission(submission)
path = 'openassessmentblock/self/oa_self_assessment.html'
else:
# No submission yet or in peer assessment
path = 'openassessmentblock/self/oa_self_unavailable.html'
return path, context
开发者ID:UQ-UQx,项目名称:edx-ora2,代码行数:57,代码来源:self_assessment_mixin.py
示例6: test_get_submission
def test_get_submission(self):
# Test base case that we can create a submission and get it back
sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_dict2 = api.get_submission(sub_dict1["uuid"])
self.assertEqual(sub_dict1, sub_dict2)
# Test invalid inputs
with self.assertRaises(api.SubmissionRequestError):
api.get_submission(20)
with self.assertRaises(api.SubmissionRequestError):
api.get_submission({})
# Test not found
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("notarealuuid")
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("0" * 50) # This is bigger than our field size
开发者ID:YoshidaKS,项目名称:edx-ora2,代码行数:17,代码来源:test_api.py
示例7: get_submission_to_assess
def get_submission_to_assess(course_id, item_id, scorer_id):
"""
Get a submission for staff evaluation.
Retrieves a submission for assessment for the given staff member.
Args:
course_id (str): The course that we would like to fetch submissions from.
item_id (str): The student_item (problem) that we would like to retrieve submissions for.
scorer_id (str): The user id of the staff member scoring this submission
Returns:
dict: A student submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for assessment.
Raises:
StaffAssessmentInternalError: Raised when there is an internal error
retrieving staff workflow information.
Examples:
>>> get_submission_to_assess("a_course_id", "an_item_id", "a_scorer_id")
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': { ... }
}
"""
student_submission_uuid = StaffWorkflow.get_submission_for_review(course_id, item_id, scorer_id)
if student_submission_uuid:
try:
submission_data = submissions_api.get_submission(student_submission_uuid)
return submission_data
except submissions_api.SubmissionNotFoundError:
error_message = (
u"Could not find a submission with the uuid {}"
).format(student_submission_uuid)
logger.exception(error_message)
raise StaffAssessmentInternalError(error_message)
else:
logger.info(
u"No submission found for staff to assess ({}, {})"
.format(
course_id,
item_id,
)
)
return None
开发者ID:openfun,项目名称:edx-ora2,代码行数:51,代码来源:staff.py
示例8: test_get_submission
def test_get_submission(self):
# Test base case that we can create a submission and get it back
sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
sub_dict2 = api.get_submission(sub_dict1["uuid"])
self.assertEqual(sub_dict1, sub_dict2)
# Test invalid inputs
with self.assertRaises(api.SubmissionRequestError):
api.get_submission(20)
with self.assertRaises(api.SubmissionRequestError):
api.get_submission({})
# Test not found
with self.assertRaises(api.SubmissionNotFoundError):
api.get_submission("deadbeef-1234-5678-9100-1234deadbeef")
开发者ID:edx,项目名称:edx-submissions,代码行数:15,代码来源:test_api.py
示例9: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid'])
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'has_submitted_feedback': has_submitted_feedback,
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]]
return ('openassessmentblock/grade/oa_grade_complete.html', context)
开发者ID:mulby,项目名称:edx-ora2,代码行数:43,代码来源:grade_mixin.py
示例10: render_self_assessment
def render_self_assessment(self, data, suffix=''):
context = {}
assessment_module = self.get_assessment_module('self-assessment')
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, date = self.is_closed(step="self-assessment")
if problem_closed:
if date == 'start':
context["self_start"] = self.format_datetime_string(date)
elif date == 'due':
context["self_due"] = self.format_datetime_string(date)
workflow = self.get_workflow_info()
if not workflow:
return self.render_assessment(path, context)
try:
submission = submission_api.get_submission(self.submission_uuid)
assessment = self_api.get_assessment(
workflow["submission_uuid"]
)
except (submission_api.SubmissionError, self_api.SelfAssessmentRequestError):
logger.exception(
u"Could not retrieve self assessment for submission {}"
.format(workflow["submission_uuid"])
)
return self.render_error(_(u"An unexpected error occurred."))
if workflow["status"] == "self":
path = 'openassessmentblock/self/oa_self_assessment.html'
context = {
"rubric_criteria": self.rubric_criteria,
"estimated_time": "20 minutes", # TODO: Need to configure this.
"self_submission": submission,
}
elif assessment is not None:
path = 'openassessmentblock/self/oa_self_complete.html'
elif date == "due" and problem_closed:
path = 'openassessmentblock/self/oa_self_closed.html'
return self.render_assessment(path, context)
开发者ID:jbau,项目名称:edx-ora2,代码行数:42,代码来源:self_assessment_mixin.py
示例11: get_user_submission
def get_user_submission(submission_uuid):
"""Return the most recent submission by user in workflow
Return the most recent submission. If no submission is available,
return None. All submissions are preserved, but only the most recent
will be returned in this function, since the active workflow will only
be concerned with the most recent submission.
Args:
submission_uuid (str): The uuid for the submission to retrieve.
Returns:
(dict): A dictionary representation of a submission to render to
the front end.
"""
try:
return api.get_submission(submission_uuid)
except api.SubmissionRequestError:
# This error is actually ok.
return None
开发者ID:EDUlib,项目名称:edx-ora2,代码行数:21,代码来源:submission_mixin.py
示例12: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
feedback = peer_api.get_assessment_feedback(self.submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(workflow['submission_uuid'])
peer_assessments = peer_api.get_assessments(student_submission['uuid'])
self_assessment = self_api.get_assessment(student_submission['uuid'])
has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None
context = {
'score': workflow['score'],
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'rubric_criteria': copy.deepcopy(self.rubric_criteria),
'has_submitted_feedback': has_submitted_feedback,
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]]
return ('openassessmentblock/grade/oa_grade_complete.html', context)
开发者ID:jbau,项目名称:edx-ora2,代码行数:37,代码来源:grade_mixin.py
示例13: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
# Import is placed here to avoid model import at project startup.
from openassessment.assessment.api import peer as peer_api
from openassessment.assessment.api import self as self_api
from openassessment.assessment.api import staff as staff_api
from submissions import api as sub_api
# Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
staff_assessment = None
self_assessment = None
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "peer-assessment" in assessment_steps:
peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = [
self._assessment_grade_context(peer_assessment)
for peer_assessment in peer_api.get_assessments(submission_uuid)
]
has_submitted_feedback = feedback is not None
if "self-assessment" in assessment_steps:
self_assessment = self._assessment_grade_context(
self_api.get_assessment(submission_uuid)
)
raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
if raw_staff_assessment:
staff_assessment = self._assessment_grade_context(raw_staff_assessment)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(submission_uuid)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'has_submitted_feedback': has_submitted_feedback,
'student_submission': create_submission_dict(student_submission, self.prompts),
'peer_assessments': peer_assessments,
'grade_details': self.grade_details(
submission_uuid,
peer_assessments=peer_assessments,
self_assessment=self_assessment,
staff_assessment=staff_assessment,
),
'file_upload_type': self.file_upload_type,
'allow_latex': self.allow_latex,
'file_urls': self.get_download_urls_from_submission(student_submission),
'xblock_id': self.get_xblock_id()
}
return ('openassessmentblock/grade/oa_grade_complete.html', context)
开发者ID:openfun,项目名称:edx-ora2,代码行数:74,代码来源:grade_mixin.py
示例14: get_submission_to_assess
def get_submission_to_assess(submission_uuid, graded_by):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student. This will
not return a submission submitted by the requesting scorer. Submissions are
returned based on how many assessments are still required, and if there are
peers actively assessing a particular submission. If there are no
submissions requiring assessment, a submission may be returned that will be
'over graded', and the assessment will not be counted towards the overall
grade.
Args:
submission_uuid (str): The submission UUID from the student
requesting a submission for assessment. This is used to explicitly
avoid giving the student their own submission, and determines the
associated Peer Workflow.
graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process.
Returns:
dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for assessment.
Raises:
PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request.
PeerAssessmentInternalError: Raised when there is an internal error
retrieving peer workflow information.
PeerAssessmentWorkflowError: Raised when an error occurs because this
function, or the student item, is not in the proper workflow state
to retrieve a peer submission.
Examples:
>>> get_submission_to_assess("abc123", 3)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
if not workflow:
raise PeerAssessmentWorkflowError(
u"A Peer Assessment Workflow does not exist for the student "
u"with submission UUID {}".format(submission_uuid)
)
peer_submission_uuid = workflow.find_active_assessments()
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise,
# get the first submission available for over grading ("over-grading").
if peer_submission_uuid is None:
peer_submission_uuid = workflow.get_submission_for_review(graded_by)
if peer_submission_uuid is None:
peer_submission_uuid = workflow.get_submission_for_over_grading()
if peer_submission_uuid:
try:
submission_data = sub_api.get_submission(peer_submission_uuid)
PeerWorkflow.create_item(workflow, peer_submission_uuid)
_log_workflow(peer_submission_uuid, workflow)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = (
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
).format(peer_submission_uuid, workflow.student_id)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
logger.info(
u"No submission found for {} to assess ({}, {})"
.format(
workflow.student_id,
workflow.course_id,
workflow.item_id,
)
)
return None
开发者ID:caesar2164,项目名称:edx-ora2,代码行数:81,代码来源:peer.py
示例15: test_get_submission_deep_error
def test_get_submission_deep_error(self, mock_get):
# Test deep explosions are wrapped
mock_get.side_effect = DatabaseError("Kaboom!")
api.get_submission("000000000000000")
开发者ID:YoshidaKS,项目名称:edx-ora2,代码行数:4,代码来源:test_api.py
示例16: get_submission_to_assess
def get_submission_to_assess(
student_item_dict,
graded_by,
over_grading=False):
"""Get a submission to peer evaluate.
Retrieves a submission for assessment for the given student_item. This will
not return a submission submitted by the requesting scorer. Submissions are
returned based on how many assessments are still required, and if there are
peers actively assessing a particular submission. If there are no
submissions requiring assessment, a submission may be returned that will be
'over graded', and the assessment will not be counted towards the overall
grade.
Args:
student_item_dict (dict): The student item information from the student
requesting a submission for assessment. The dict contains an
item_id, course_id, and item_type, used to identify the unique
question for the review, while the student_id is used to explicitly
avoid giving the student their own submission.
graded_by (int): The number of assessments a submission
requires before it has completed the peer assessment process.
over_grading (bool): Allows over grading to be performed if no submission
requires assessments. Over grading should only occur if the deadline
for submissions has passed, but there is still a window for peer
assessment. Defaults to False.
Returns:
dict: A peer submission for assessment. This contains a 'student_item',
'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
used for assessment.
Raises:
PeerAssessmentRequestError: Raised when the request parameters are
invalid for the request.
PeerAssessmentInternalError: Raised when there is an internal error
retrieving peer workflow information.
PeerAssessmentWorkflowError: Raised when an error occurs because this
function, or the student item, is not in the proper workflow state
to retrieve a peer submission.
Examples:
>>> student_item_dict = dict(
>>> item_id="item_1",
>>> course_id="course_1",
>>> item_type="type_one",
>>> student_id="Bob",
>>> )
>>> get_submission_to_assess(student_item_dict, 3)
{
'student_item': 2,
'attempt_number': 1,
'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
'answer': u'The answer is 42.'
}
"""
workflow = _get_latest_workflow(student_item_dict)
if not workflow:
raise PeerAssessmentWorkflowError(_(
u"A Peer Assessment Workflow does not exist for the specified "
u"student."))
submission_uuid = _find_active_assessments(workflow)
# If there is an active assessment for this user, get that submission,
# otherwise, get the first assessment for review, otherwise, if over grading
# is turned on, get the first submission available for over grading.
if submission_uuid is None:
submission_uuid = _get_submission_for_review(workflow, graded_by)
if submission_uuid is None and over_grading:
submission_uuid = _get_submission_for_over_grading(workflow)
if submission_uuid:
try:
submission_data = sub_api.get_submission(submission_uuid)
_create_peer_workflow_item(workflow, submission_uuid)
_log_workflow(submission_uuid, student_item_dict, over_grading)
return submission_data
except sub_api.SubmissionNotFoundError:
error_message = _(
u"Could not find a submission with the uuid {} for student {} "
u"in the peer workflow."
.format(submission_uuid, student_item_dict)
)
logger.exception(error_message)
raise PeerAssessmentWorkflowError(error_message)
else:
logger.info(
u"No submission found for {} to assess ({}, {})"
.format(
student_item_dict["student_id"],
student_item_dict["course_id"],
student_item_dict["item_id"],
)
)
return None
开发者ID:jbau,项目名称:edx-ora2,代码行数:95,代码来源:peer_api.py
示例17: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
# Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
example_based_assessment = None
self_assessment = None
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = peer_api.get_assessments(submission_uuid)
has_submitted_feedback = feedback is not None
if "self-assessment" in assessment_steps:
self_assessment = self_api.get_assessment(submission_uuid)
if "example-based-assessment" in assessment_steps:
example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(submission_uuid)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'example_based_assessment': example_based_assessment,
'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
'has_submitted_feedback': has_submitted_feedback,
'allow_file_upload': self.allow_file_upload,
'file_url': self.get_download_url_from_submission(student_submission)
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in
# the XBlock field
max_scores = peer_api.get_rubric_max_scores(submission_uuid)
median_scores = None
if "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
elif "example-based-assessment" in assessment_steps:
median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
criterion["median_score"] = median_scores[criterion["name"]]
criterion["total_value"] = max_scores[criterion["name"]]
return ('openassessmentblock/grade/oa_grade_complete.html', context)
开发者ID:caesar2164,项目名称:edx-ora2,代码行数:73,代码来源:grade_mixin.py
示例18: render_grade_complete
def render_grade_complete(self, workflow):
"""
Render the grade complete state.
Args:
workflow (dict): The serialized Workflow model.
Returns:
tuple of context (dict), template_path (string)
"""
# Peer specific stuff...
assessment_steps = self.assessment_steps
submission_uuid = workflow['submission_uuid']
example_based_assessment = None
self_assessment = None
feedback = None
peer_assessments = []
has_submitted_feedback = False
if "peer-assessment" in assessment_steps:
feedback = peer_api.get_assessment_feedback(submission_uuid)
peer_assessments = [
self._assessment_grade_context(asmnt)
for asmnt in peer_api.get_assessments(submission_uuid)
]
has_submitted_feedback = feedback is not None
if "self-assessment" in assessment_steps:
self_assessment = self._assessment_grade_context(
self_api.get_assessment(submission_uuid)
)
if "example-based-assessment" in assessment_steps:
example_based_assessment = self._assessment_grade_context(
ai_api.get_latest_assessment(submission_uuid)
)
feedback_text = feedback.get('feedback', '') if feedback else ''
student_submission = sub_api.get_submission(submission_uuid)
# We retrieve the score from the workflow, which in turn retrieves
# the score for our current submission UUID.
# We look up the score by submission UUID instead of student item
# to ensure that the score always matches the rubric.
# It's possible for the score to be `None` even if the workflow status is "done"
# when all the criteria in the rubric are feedback-only (no options).
score = workflow['score']
context = {
'score': score,
'feedback_text': feedback_text,
'student_submission': student_submission,
'peer_assessments': peer_assessments,
'self_assessment': self_assessment,
'example_based_assessment': example_based_assessment,
'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
'has_submitted_feedback': has_submitted_feedback,
'allow_file_upload': self.allow_file_upload,
'allow_latex': self.allow_latex,
'file_url': self.get_download_url_from_submission(student_submission)
}
# Update the scores we will display to the user
# Note that we are updating a *copy* of the rubric criteria stored in
# the XBlock field
max_scores = peer_api.get_rubric_max_scores(submission_uuid)
median_scores = None
if "peer-assessment" in assessment_steps:
median_scores = peer_api.get_assessment_median_scores(submission_uuid)
elif "self-assessment" in assessment_steps:
median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
elif "example-based-assessment" in assessment_steps:
median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)
if median_scores is not None and max_scores is not None:
for criterion in context["rubric_criteria"]:
# Although we prevent course authors from modifying criteria post-release,
# it's still possible for assessments created by course staff to
# have criteria that differ from the current problem definition.
# It's also possible to circumvent the post-release restriction
# if course authors directly import a course into Studio.
# If this happens, we simply leave the score blank so that the grade
# section can render without error.
criterion["median_score"] = median_scores.get(criterion["name"], '')
criterion["total_value"] = max_scores.get(criterion["name"], '')
return ('openassessmentblock/grade/oa_grade_complete.html', context)
开发者ID:robertgerinlajoie,项目名称:edx-ora2,代码行数:88,代码来源:grade_mixin.py
示例19: self_path_and_context
def self_path_and_context(self):
"""
Determine the template path and context to use when rendering the self-assessment step.
Returns:
tuple of `(path, context)`, where `path` (str) is the path to the template,
and `context` (dict) is the template context.
Raises:
SubmissionError: Error occurred while retrieving the current submission.
SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
"""
# Import is placed here to avoid model import at project startup.
from submissions import api as submission_api
path = 'openassessmentblock/self/oa_self_unavailable.html'
problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
user_preferences = get_user_preferences(self.runtime.service(self, 'user'))
context = {
'allow_latex': self.allow_latex,
'prompts_type': self.prompts_type,
"xblock_id": self.get_xblock_id(),
'user_timezone': user_preferences['user_timezone'],
'user_language': user_preferences['user_language']
}
# We display the due date whether the problem is open or closed.
# If no date is set, it defaults to the d
|
请发表评论