diff --git a/bin/relate b/bin/relate index c9468306a..58699b063 100644 --- a/bin/relate +++ b/bin/relate @@ -52,7 +52,8 @@ def expand_yaml(yml_file, repo_root): def test_code_question(page_desc, repo_root): if page_desc.type not in [ "PythonCodeQuestion", - "PythonCodeQuestionWithHumanTextFeedback"]: + "PythonCodeQuestionWithHumanTextFeedback", + "OctaveCodeQuestion"]: return print(75*"-") @@ -64,11 +65,24 @@ def test_code_question(page_desc, repo_root): correct_code = getattr(page_desc, "correct_code", "") - from course.page.code_run_backend import \ - substitute_correct_code_into_test_code + if page_desc.type in [ + "PythonCodeQuestion", + "PythonCodeQuestionWithHumanTextFeedback" ]: + from course.page.code_run_backend_python import \ + substitute_correct_code_into_test_code + elif page_desc.type in [ + "OctaveCodeQuestion" ]: + from course.page.code_run_backend_octave import \ + substitute_correct_code_into_test_code test_code = substitute_correct_code_into_test_code(test_code, correct_code) - from course.page.code_run_backend import run_code, package_exception + if page_desc.type in [ + "PythonCodeQuestion", + "PythonCodeQuestionWithHumanTextFeedback" ]: + from course.page.code_run_backend_python import run_code, package_exception + elif page_desc.type in [ + "OctaveCodeQuestion" ]: + from course.page.code_run_backend_octave import run_code, package_exception data_files = {} diff --git a/course/page/__init__.py b/course/page/__init__.py index e268f9d69..39c9ce5ce 100644 --- a/course/page/__init__.py +++ b/course/page/__init__.py @@ -37,6 +37,8 @@ ChoiceQuestion, MultipleChoiceQuestion, SurveyChoiceQuestion) from course.page.code import ( PythonCodeQuestion, PythonCodeQuestionWithHumanTextFeedback) +from course.page.code import ( + OctaveCodeQuestion) from course.page.upload import FileUploadQuestion __all__ = ( diff --git a/course/page/code.py b/course/page/code.py index 8fe25bdbe..43dd0206a 100644 --- a/course/page/code.py +++ b/course/page/code.py @@ -110,8 +110,8 @@ def debug_print(s): command_path = '/opt/runcode/runcode' user = 'runcode' - # The following is necessary because tests don't arise from a CodeQuestion - # object, so we provide a fallback. + # The following is necessary because some tests don't arise from a + # CodeQuestion object, so we provide a fallback. debug_print('Image is %s.' % repr(image)) if image is None: image = settings.RELATE_DOCKER_RUNPY_IMAGE @@ -230,7 +230,7 @@ def check_timeout(): start_time = time() debug_print("BEFPOST") - connection.request('POST', '/run-python', json_run_req, headers) + connection.request('POST', '/run-code', json_run_req, headers) debug_print("AFTPOST") http_response = connection.getresponse() @@ -577,6 +577,8 @@ def answer_data(self, page_context, page_data, form, files_data): return {"answer": form.cleaned_data["answer"].strip()} def get_test_code(self): + # Note to developers: this function should be replaced in daughter + # classes as it defaults to Python. It is required by grade() though. test_code = getattr(self.page_desc, "test_code", None) if test_code is None: return test_code @@ -585,7 +587,7 @@ def get_test_code(self): if correct_code is None: correct_code = "" - from .code_run_backend import substitute_correct_code_into_test_code + from .code_run_backend_python import substitute_correct_code_into_test_code return substitute_correct_code_into_test_code(test_code, correct_code) def grade(self, page_context, page_data, answer_data, grade_data): @@ -1167,6 +1169,18 @@ def __init__(self, vctx, location, page_desc, language_mode='python'): super(PythonCodeQuestion, self).__init__(vctx, location, page_desc, language_mode) + def get_test_code(self): + test_code = getattr(self.page_desc, "test_code", None) + if test_code is None: + return test_code + + correct_code = getattr(self.page_desc, "correct_code", None) + if correct_code is None: + correct_code = "" + + from .code_run_backend_python import substitute_correct_code_into_test_code + return substitute_correct_code_into_test_code(test_code, correct_code) + # }}} @@ -1372,4 +1386,214 @@ def grade(self, page_context, page_data, answer_data, grade_data): # }}} + +# {{{ octave code question + +class OctaveCodeQuestion(CodeQuestion): + """ + An auto-graded question allowing an answer consisting of Octave code. + All user code as well as all code specified as part of the problem + is in Octave 4.2+. + + If you are not including the + :attr:`course.constants.flow_permission.change_answer` + permission for your entire flow, you likely want to + include this snippet in your question definition: + + .. code-block:: yaml + + access_rules: + add_permissions: + - change_answer + + This will allow participants multiple attempts at getting + the right answer. + + .. attribute:: id + + |id-page-attr| + + .. attribute:: type + + ``OctaveCodeQuestion`` + + .. attribute:: is_optional_page + + |is-optional-page-attr| + + .. attribute:: access_rules + + |access-rules-page-attr| + + .. attribute:: title + + |title-page-attr| + + .. attribute:: value + + |value-page-attr| + + .. attribute:: prompt + + The page's prompt, written in :ref:`markup`. + + .. attribute:: timeout + + A number, giving the number of seconds for which setup code, + the given answer code, and the test code (combined) will be + allowed to run. + + .. attribute:: setup_code + + Optional. + Octave code to prepare the environment for the participants + answer. + + .. attribute:: show_setup_code + + Optional. ``True`` or ``False``. If true, the :attr:`setup_code` + will be shown to the participant. + + .. attribute:: names_for_user + + Optional. + Symbols defined at the end of the :attr:`setup_code` that will be + made available to the participant's code. + + A deep copy (using the standard library function :func:`copy.deepcopy`) + of these values is made, to prevent the user from modifying trusted + state of the grading code. + + .. attribute:: names_from_user + + Optional. + Symbols that the participant's code is expected to define. + These will be made available to the :attr:`test_code`. + Some remapping of types will be made between Octave and Python classes. + + .. attribute:: test_code + + Optional. + Code that will be run to determine the correctness of a + student-provided solution. Will have access to variables in + :attr:`names_from_user` (which will be *None*) if not provided. Should + never raise an exception. + + This may contain the marker "###CORRECT_CODE###", which will + be replaced with the contents of :attr:`correct_code`, with + each line indented to the same depth as where the marker + is found. The line with this marker is only allowed to have + white space and the marker on it. + + .. attribute:: show_test_code + + Optional. ``True`` or ``False``. If true, the :attr:`test_code` + will be shown to the participant. + + .. attribute:: correct_code_explanation + + Optional. + Code that is revealed when answers are visible + (see :ref:`flow-permissions`). This is shown before + :attr:`correct_code` as an explanation. + + .. attribute:: correct_code + + Optional. + Code that is revealed when answers are visible + (see :ref:`flow-permissions`). + + .. attribute:: initial_code + + Optional. + Code present in the code input field when the participant first starts + working on their solution. + + .. attribute:: data_files + + Optional. + A list of file names in the :ref:`git-repo` whose contents will be made + available to :attr:`setup_code` and :attr:`test_code` through the + ``data_files`` dictionary. (see below) + + .. attribute:: single_submission + + Optional, a Boolean. If the question does not allow multiple submissions + based on its :attr:`access_rules` (not the ones of the flow), a warning + is shown. Setting this attribute to True will silence the warning. + + The following symbols are available in :attr:`setup_code` and :attr:`test_code`: + + * ``GradingComplete``: An exception class that can be raised to indicated + that the grading code has concluded. + + * ``feedback``: A class instance with the following interface:: + + feedback.set_points(0.5) # 0<=points<=1 (usually) + feedback.add_feedback("This was wrong") + + # combines the above two and raises GradingComplete + feedback.finish(0, "This was wrong") + + feedback.check_numpy_array_sanity(name, num_axes, data) + + feedback.check_numpy_array_features(name, ref, data, report_failure=True) + + feedback.check_numpy_array_allclose(name, ref, data, + accuracy_critical=True, rtol=1e-5, atol=1e-8, + report_success=True, report_failure=True) + # If report_failure is True, this function will only return + # if *data* passes the tests. It will return *True* in this + # case. + # + # If report_failure is False, this function will always return, + # and the return value will indicate whether *data* passed the + # accuracy/shape/kind checks. + + feedback.check_list(name, ref, data, entry_type=None) + + feedback.check_scalar(name, ref, data, accuracy_critical=True, + rtol=1e-5, atol=1e-8, report_success=True, report_failure=True) + # returns True if accurate + + feedback.call_user(f, *args, **kwargs) + # Calls a user-supplied function and prints an appropriate + # feedback message in case of failure. + + * ``data_files``: A dictionary mapping file names from :attr:`data_files` + to :class:`bytes` instances with that file's contents. + + * ``user_code``: The user code being tested, as a string. + """ + + @property + def language_mode(self): + return 'octave' + + @property + def container_image(self): + return settings.RELATE_DOCKER_RUNOCTAVE_IMAGE + + @property + def suffix(self): + return '.m' + + def __init__(self, vctx, location, page_desc, language_mode='octave'): + super(OctaveCodeQuestion, self).__init__(vctx, location, + page_desc, language_mode) + + def get_test_code(self): + test_code = getattr(self.page_desc, "test_code", None) + if test_code is None: + return test_code + + correct_code = getattr(self.page_desc, "correct_code", None) + if correct_code is None: + correct_code = "" + + from .code_run_backend_octave import substitute_correct_code_into_test_code + return substitute_correct_code_into_test_code(test_code, correct_code) + +# }}} + # vim: foldmethod=marker diff --git a/course/page/code_run_backend_octave.py b/course/page/code_run_backend_octave.py new file mode 100644 index 000000000..2ab51c6ae --- /dev/null +++ b/course/page/code_run_backend_octave.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import sys +import traceback + +try: + from .code_feedback import Feedback, GradingComplete +except SystemError: + from code_feedback import Feedback, GradingComplete # type: ignore +except ImportError: + from code_feedback import Feedback, GradingComplete # type: ignore + + +__doc__ = """ +PROTOCOL +======== + +.. class:: Request + + .. attribute:: setup_code + + .. attribute:: names_for_user + + .. attribute:: user_code + + .. attribute:: names_from_user + + .. attribute:: test_code + + .. attribute:: data_files + + A dictionary from data file names to their + base64-cencoded contents. + Optional. + + .. attribute:: compile_only + + :class:`bool` + +.. class Response:: + .. attribute:: result + + One of + + * ``success`` + * ``timeout`` + * ``uncaught_error`` + * ``setup_compile_error`` + * ``setup_error``, + * ``user_compile_error`` + * ``user_error`` + * ``test_compile_error`` + * ``test_error`` + + Always present. + + .. attribute:: message + + Optional. + + .. attribute:: traceback + + Optional. + + .. attribute:: stdout + + Whatever came out of stdout. + + Optional. + + .. attribute:: stderr + + Whatever came out of stderr. + + Optional. + + .. attribute:: figures + + A list of ``(index, mime_type, string)``, where *string* is a + base64-encoded representation of the figure. *index* will usually + correspond to the matplotlib figure number. + + Optional. + + .. attribute:: html + + A list of HTML strings generated. These are aggressively sanitized + before being rendered. + + .. attribute:: points + + A number between 0 and 1 (inclusive). + + Present on ``success`` if :attr:`Request.compile_only` is *False*. + + .. attribute:: feedback + + A list of strings. + + Present on ``success`` if :attr:`Request.compile_only` is *False*. +""" + + +# {{{ tools + +class Struct(object): + def __init__(self, entries): + for name, val in entries.items(): + self.__dict__[name] = val + + def __repr__(self): + return repr(self.__dict__) + +# }}} + + +def substitute_correct_code_into_test_code(test_code, correct_code): + import re + CORRECT_CODE_TAG = re.compile(r"^(\s*)###CORRECT_CODE###\s*$") # noqa + + new_test_code_lines = [] + for l in test_code.split("\n"): + match = CORRECT_CODE_TAG.match(l) + if match is not None: + prefix = match.group(1) + for cc_l in correct_code.split("\n"): + new_test_code_lines.append(prefix+cc_l) + else: + new_test_code_lines.append(l) + + return "\n".join(new_test_code_lines) + + +def package_exception(result, what): + tp, val, tb = sys.exc_info() + result["result"] = what + result["message"] = "%s: %s" % (tp.__name__, str(val)) + result["traceback"] = "".join( + traceback.format_exception(tp, val, tb)) + + +def run_code(result, run_req): + # {{{ set up octave process + + import oct2py + + oc = oct2py.Oct2Py() + + # }}} + + # {{{ run code + + data_files = {} + if hasattr(run_req, "data_files"): + from base64 import b64decode + for name, contents in run_req.data_files.items(): + # This part "cheats" a litle, since Octave lets us evaluate + # functions in the same context as the main code. + # (MATLAB segregates these.) + # + # Alternatively, one could use octave.addpath('/path/to/') first. + data_files[name] = b64decode(contents.encode()) + oc.eval(b64decode(contents.encode()).decode("utf-8")) + + generated_html = [] + result["html"] = generated_html + + def output_html(s): + generated_html.append(s) + + feedback = Feedback() + maint_ctx = { + "feedback": feedback, + "user_code": run_req.user_code, + "data_files": data_files, + "output_html": output_html, + "GradingComplete": GradingComplete, + } + + if hasattr(run_req, "setup_code") and run_req.setup_code is not None: + try: + oc.eval(run_req.setup_code) + except Exception: + package_exception(result, "setup_error") + return + + user_ctx = {} + if hasattr(run_req, "names_for_user"): + # parse out Octave variables to pull + ctx_lines = [] + oc.eval('whos', stream_handler=ctx_lines.append) + ctx_vars = [] + for line in ctx_lines: + if ' '*8 in line: + line_data = line.split() + if line_data[0] in ('Attr', '===='): + continue + ctx_vars.append(line_data[0]) + + for name in run_req.names_for_user: + if name not in maint_ctx: + result["result"] = "setup_error" + result["message"] = "Setup code did not define '%s'." % name + + user_ctx[name] = oc.pull(name) + + from copy import deepcopy + user_ctx = deepcopy(user_ctx) + ''' + for name in user_ctx: + oc.push(name,user_ctx[name]) + ''' + + try: + oc.eval(run_req.user_code, plot_dir='figures', + plot_name='octave', plot_format='png') + except Exception: + package_exception(result, "user_error") + return + + # {{{ export plots + + ''' + # XXX TODO implement using oct2py.extract_figures + if "matplotlib" in sys.modules: + import matplotlib.pyplot as pt + from io import BytesIO + from base64 import b64encode + + format = "png" + mime = "image/png" + figures = [] + + for fignum in pt.get_fignums(): + pt.figure(fignum) + bio = BytesIO() + try: + pt.savefig(bio, format=format) + except Exception: + pass + else: + figures.append( + (fignum, mime, b64encode(bio.getvalue()).decode())) + + result["figures"] = figures + ''' + # }}} + + if hasattr(run_req, "names_from_user"): + values = [] + for name in run_req.names_for_user: + try: + maint_ctx[name] = oc.pull(name) + except oct2py.Oct2PyError: + feedback.add_feedback( + "Required answer variable '%s' is not defined." + % name) + maint_ctx[name] = None + + for name in run_req.names_from_user: + try: + maint_ctx[name] = oc.pull(name) + except oct2py.Oct2PyError: + feedback.add_feedback( + "Required answer variable '%s' is not defined." + % name) + maint_ctx[name] = None + + if run_req.test_code is not None: # XXX test code is written in Python + try: + maint_ctx["_MODULE_SOURCE_CODE"] = run_req.test_code + exec(run_req.test_code, maint_ctx) + except GradingComplete: + pass + except Exception: + package_exception(result, "test_error") + return + + result["points"] = feedback.points + result["feedback"] = feedback.feedback_items + + # }}} + + result["result"] = "success" + +# vim: foldmethod=marker diff --git a/course/page/code_run_backend.py b/course/page/code_run_backend_python.py similarity index 100% rename from course/page/code_run_backend.py rename to course/page/code_run_backend_python.py diff --git a/docker-image-run-octave/Dockerfile b/docker-image-run-octave/Dockerfile new file mode 100644 index 000000000..d4abfe8f3 --- /dev/null +++ b/docker-image-run-octave/Dockerfile @@ -0,0 +1,61 @@ +FROM inducer/debian-amd64-minbase +MAINTAINER Neal Davis +EXPOSE 9941 +RUN useradd runcode + +# Based on `compdatasci/octave-desktop` Docker image +ARG OCTAVE_VERSION=5.1.0 + +# Install system packages and Octave +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + wget \ + curl \ + build-essential \ + gfortran \ + cmake \ + libarchive-tools \ + rsync \ + imagemagick \ + \ + gnuplot-x11 \ + libopenblas-base +RUN apt-get install -y --no-install-recommends \ + octave \ + liboctave-dev \ + octave-info \ + octave-symbolic \ + octave-parallel \ + octave-struct \ + octave-statistics +RUN apt-get install -y --no-install-recommends \ + python3-dev \ + python3-setuptools \ + python3-pip \ + python3-numpy \ + python3-scipy \ + python3-matplotlib \ + pandoc \ + ttf-dejavu +RUN apt-get clean && \ + apt-get autoremove && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +RUN apt-get update +RUN pip3 install --upgrade pip +RUN pip3 install oct2py + +RUN apt-get clean +RUN fc-cache + +RUN mkdir -p /opt/runcode +ADD runcode /opt/runcode/ +COPY code_feedback.py /opt/runcode/ +COPY code_run_backend.py /opt/runcode/ + +# currently no graphics support +#TODO + +RUN rm -Rf /root/.cache + +# may use ./flatten-container.sh to reduce disk space diff --git a/docker-image-run-octave/docker-build.sh b/docker-image-run-octave/docker-build.sh new file mode 100755 index 000000000..0ed4e6ad7 --- /dev/null +++ b/docker-image-run-octave/docker-build.sh @@ -0,0 +1,5 @@ +#! /bin/sh +cp ../course/page/code_feedback.py . +cp ../course/page/code_run_backend_octave.py code_run_backend.py +docker build --no-cache . -t davis68/relate-runcode-octave +rm code_feedback.py code_run_backend.py diff --git a/docker-image-run-octave/flatten-container.sh b/docker-image-run-octave/flatten-container.sh new file mode 100755 index 000000000..d18bb6434 --- /dev/null +++ b/docker-image-run-octave/flatten-container.sh @@ -0,0 +1,12 @@ +#! /bin/bash + +if test "$1" = ""; then + echo "$0 imagename" + exit 1 +fi +CONTAINER=$(docker create "$1") +docker export "$CONTAINER" | \ + docker import \ + -c "EXPOSE 9941" \ + - +docker rm -f $CONTAINER diff --git a/docker-image-run-octave/runcode b/docker-image-run-octave/runcode new file mode 100755 index 000000000..0df791dbd --- /dev/null +++ b/docker-image-run-octave/runcode @@ -0,0 +1,156 @@ +#! /usr/bin/env python3 + +# placate flake8 +from __future__ import print_function + +__copyright__ = "Copyright (C) 2014 Andreas Kloeckner" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import socketserver +import json +import sys +import io +try: + from code_run_backend import Struct, run_code, package_exception +except ImportError: + try: + # When faking a container for unittest + from course.page.code_run_backend import ( + Struct, run_code, package_exception) + except ImportError: + # When debugging, i.e., run "python runpy" command line + import os + sys.path.insert(0, os.path.abspath( + os.path.join(os.path.dirname(__file__), os.pardir))) + from course.page.code_run_backend import ( + Struct, run_code, package_exception) + +from http.server import BaseHTTPRequestHandler + +PORT = 9941 +OUTPUT_LENGTH_LIMIT = 16*1024 + +TEST_COUNT = 0 + + +def truncate_if_long(s): + if len(s) > OUTPUT_LENGTH_LIMIT: + s = s[:OUTPUT_LENGTH_LIMIT] + "[TRUNCATED... TOO MUCH OUTPUT]" + return s + + +class RunRequestHandler(BaseHTTPRequestHandler): + def do_GET(self): + print("GET RECEIVED", file=sys.stderr) + if self.path != "/ping": + raise RuntimeError("unrecognized path in GET") + + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + + self.wfile.write(b"OK") + print("PING RESPONSE DONE", file=sys.stderr) + + def do_POST(self): + global TEST_COUNT + TEST_COUNT += 1 + + response = {} + + prev_stdout = sys.stdout # noqa + prev_stderr = sys.stderr # noqa + + try: + print("POST RECEIVED", file=prev_stderr) + if self.path != "/run-code": + raise RuntimeError("unrecognized path in POST") + + clength = int(self.headers['content-length']) + recv_data = self.rfile.read(clength) + + print("RUNOCTAVE RECEIVED %d bytes" % len(recv_data), + file=prev_stderr) + run_req = Struct(json.loads(recv_data.decode("utf-8"))) + print("REQUEST: %r" % run_req, file=prev_stderr) + + stdout = io.StringIO() + stderr = io.StringIO() + + sys.stdin = None + sys.stdout = stdout + sys.stderr = stderr + + run_code(response, run_req) + + response["stdout"] = truncate_if_long(stdout.getvalue()) + response["stderr"] = truncate_if_long(stderr.getvalue()) + + print("REQUEST SERVICED: %r" % response, file=prev_stderr) + + json_result = json.dumps(response).encode("utf-8") + + self.send_response(200) + self.send_header("Content-type", "application/json") + self.end_headers() + + print("WRITING RESPONSE", file=prev_stderr) + self.wfile.write(json_result) + print("WROTE RESPONSE", file=prev_stderr) + except: + print("ERROR RESPONSE", file=prev_stderr) + response = {} + package_exception(response, "uncaught_error") + json_result = json.dumps(response).encode("utf-8") + + self.send_response(500) + self.send_header("Content-type", "application/json") + self.end_headers() + + self.wfile.write(json_result) + finally: + sys.stdout = prev_stdout + sys.stderr = prev_stderr + + +def main(): + print("STARTING, LISTENING ON %d" % PORT, file=sys.stderr) + server = socketserver.TCPServer(("", PORT), RunRequestHandler) + + serve_single_test = len(sys.argv) > 1 and sys.argv[1] == "-1" + + while True: + server.handle_request() + print("SERVED REQUEST", file=sys.stderr) + if TEST_COUNT > 0 and serve_single_test: + break + + server.server_close() + print("FINISHED server_close()", file=sys.stderr) + + print("EXITING", file=sys.stderr) + + +if __name__ == "__main__": + main() + +# vim: foldmethod=marker diff --git a/docker-image-run-py/docker-build.sh b/docker-image-run-py/docker-build.sh index a53f0528f..e20b1ded2 100755 --- a/docker-image-run-py/docker-build.sh +++ b/docker-image-run-py/docker-build.sh @@ -1,5 +1,5 @@ #! /bin/sh cp ../course/page/code_feedback.py . -cp ../course/page/code_run_backend.py . +cp ../course/page/code_run_backend_python.py code_run_backend.py docker build --no-cache . -t inducer/relate-runcode-python rm code_feedback.py code_run_backend.py diff --git a/docker-image-run-py/runcode b/docker-image-run-py/runcode index 87623061b..ea06a70b3 100755 --- a/docker-image-run-py/runcode +++ b/docker-image-run-py/runcode @@ -82,7 +82,7 @@ class RunRequestHandler(BaseHTTPRequestHandler): try: print("POST RECEIVED", file=prev_stderr) - if self.path != "/run-python": + if self.path != "/run-code": raise RuntimeError("unrecognized path in POST") clength = int(self.headers['content-length']) diff --git a/setup.cfg b/setup.cfg index 7cf94478b..cd1a3ad6a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -19,7 +19,8 @@ omit = setup.py local_settings_example.py course/page/code_feedback.py - course/page/code_run_backend.py + course/page/code_run_backend_python.py + course/page/code_run_backend_octave.py */wsgi.py */tests/* */tests.py diff --git a/tests/base_test_mixins.py b/tests/base_test_mixins.py index f3ec3523c..4de2244b9 100644 --- a/tests/base_test_mixins.py +++ b/tests/base_test_mixins.py @@ -2508,6 +2508,65 @@ def tearDownClass(cls): # noqa cls.faked_container_process.kill() +class SubprocessRunOctaveContainerMixin(object): + """ + This mixin is used to fake a runoctave container, only needed when + the TestCase include test(s) for code questions + """ + @classmethod + def setUpClass(cls): # noqa + if not may_run_expensive_tests(): + from unittest import SkipTest + raise SkipTest(SKIP_EXPENSIVE_TESTS_REASON) + + super(SubprocessRunOctaveContainerMixin, cls).setUpClass() + + octave_executable = os.getenv("OCTAVE_EXECUTABLE") + + if not octave_executable: + import subprocess + ps = subprocess.run(["which", "octave-cli"], capture_output=True) + octave_executable = ps.stdout.strip() + + import subprocess + args = [octave_executable, + os.path.abspath( + os.path.join( + os.path.dirname(__file__), os.pardir, + "docker-image-run-octave", "runcode")), + ] + cls.faked_container_process = subprocess.Popen( + args, + stdout=subprocess.DEVNULL, + + # because runpy prints to stderr + stderr=subprocess.DEVNULL + ) + + def setUp(self): + super(SubprocessRunOctaveContainerMixin, self).setUp() + self.faked_container_patch = mock.patch( + "course.page.code.SPAWN_CONTAINERS", False) + self.faked_container_patch.start() + self.addCleanup(self.faked_container_patch.stop) + + @classmethod + def tearDownClass(cls): # noqa + super(SubprocessRunOctaveContainerMixin, cls).tearDownClass() + + from course.page.code import SPAWN_CONTAINERS + # Make sure SPAWN_CONTAINERS is reset to True + assert SPAWN_CONTAINERS + if sys.platform.startswith("win"): + # Without these lines, tests on Appveyor hanged when all tests + # finished. + # However, On nix platforms, these lines resulted in test + # failure when there were more than one TestCases which were using + # this mixin. So we don't kill the subprocess, and it won't bring + # bad side effects to remainder tests. + cls.faked_container_process.kill() + + def improperly_configured_cache_patch(): # can be used as context manager or decorator if six.PY3: diff --git a/tests/test_pages/markdowns.py b/tests/test_pages/markdowns.py index dc0039d84..e034cd11c 100644 --- a/tests/test_pages/markdowns.py +++ b/tests/test_pages/markdowns.py @@ -9,7 +9,7 @@ # {{{ code questions -CODE_MARKDWON = """ +PYTHON_CODE_MARKDOWN = """ type: PythonCodeQuestion access_rules: add_permissions: @@ -45,7 +45,7 @@ correct_code_explanation: This is the [explanation](http://example.com/1). """ -CODE_MARKDWON_PATTERN_WITH_DATAFILES = """ +PYTHON_CODE_MARKDOWN_PATTERN_WITH_DATAFILES = """ type: PythonCodeQuestion id: addition value: 1 @@ -84,7 +84,7 @@ c = a + b """ -CODE_MARKDWON_WITH_DATAFILES_BAD_FORMAT = """ +PYTHON_CODE_MARKDOWN_WITH_DATAFILES_BAD_FORMAT = """ type: PythonCodeQuestion id: addition value: 1 @@ -124,8 +124,7 @@ c = a + b """ - -CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 = """ +PYTHON_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 = """ type: PythonCodeQuestion access_rules: add_permissions: @@ -164,7 +163,7 @@ c = a + b """ -CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 = """ +PYTHON_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 = """ type: PythonCodeQuestion access_rules: remove_permissions: @@ -203,7 +202,7 @@ c = a + b """ -CODE_MARKDWON_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT = """ +PYTHON_CODE_MARKDOWN_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT = """ type: PythonCodeQuestion id: addition value: 1 @@ -239,7 +238,7 @@ c = a + b """ -CODE_MARKDWON_PATTERN_WITHOUT_TEST_CODE = """ +PYTHON_CODE_MARKDOWN_PATTERN_WITHOUT_TEST_CODE = """ type: PythonCodeQuestion id: addition value: 1 @@ -263,7 +262,7 @@ c = a + b """ -CODE_MARKDWON_PATTERN_WITHOUT_CORRECT_CODE = """ +PYTHON_CODE_MARKDOWN_PATTERN_WITHOUT_CORRECT_CODE = """ type: PythonCodeQuestion id: addition value: 1 @@ -297,7 +296,7 @@ """ -FEEDBACK_POINTS_CODE_MARKDWON_PATTERN = """ +PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN = """ type: PythonCodeQuestion id: addition value: 1 @@ -333,7 +332,7 @@ c = a + b """ # noqa -FEEDBACK_POINTS_CODE_MARKDWON_PATTERN = """ +PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN = """ type: PythonCodeQuestion id: addition value: 1 @@ -369,7 +368,7 @@ c = a + b """ # noqa -CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN = """ +PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN = """ type: PythonCodeQuestionWithHumanTextFeedback id: pymult access_rules: @@ -418,4 +417,356 @@ # }}} +# {{{ octave code questions + +OCTAVE_CODE_MARKDOWN = """ +type: OctaveCodeQuestion +access_rules: + add_permissions: + - change_answer +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding 1 and 2, and assign it to c + +names_from_user: [c] + +initial_code: | + c = + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = 3 + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = 2 + 1 + +correct_code_explanation: This is the [explanation](http://example.com/1). +""" + +OCTAVE_CODE_MARKDOWN_PATTERN_WITH_DATAFILES = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +data_files: + %(extra_data_file)s +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_WITH_DATAFILES_BAD_FORMAT = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +data_files: + - - foo + - bar +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 = """ +type: OctaveCodeQuestion +access_rules: + add_permissions: + - see_answer_after_submission +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 = """ +type: OctaveCodeQuestion +access_rules: + remove_permissions: + - see_answer_after_submission +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +single_submission: True +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +correct_code: | + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_PATTERN_WITHOUT_TEST_CODE = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +single_submission: True +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +correct_code: | + c = a + b +""" + +OCTAVE_CODE_MARKDOWN_PATTERN_WITHOUT_CORRECT_CODE = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +single_submission: True +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(1, "Your computed c was correct.") + else: + feedback.finish(0, "Your computed c was incorrect.") + +""" + +OCTAVE_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(%(full_points)s, "Your computed c was correct.") + else: + feedback.finish(%(min_points)s, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" # noqa + +OCTAVE_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN = """ +type: OctaveCodeQuestion +id: addition +value: 1 +timeout: 10 +prompt: | + + # Adding two numbers in Octave + +setup_code: | + pkg load statistics; + a = unifrnd(-10,10); + b = unifrnd(-10,10); + +names_for_user: [a, b] + +names_from_user: [c] + +test_code: | + if not isinstance(c, float): + feedback.finish(0, "Your computed c is not a float.") + + correct_c = a + b + rel_err = abs(correct_c-c)/abs(correct_c) + + if rel_err < 1e-7: + feedback.finish(%(full_points)s, "Your computed c was correct.") + else: + feedback.finish(%(min_points)s, "Your computed c was incorrect.") + +correct_code: | + + c = a + b +""" # noqa + +# }}} + # vim: fdm=marker diff --git a/tests/test_pages/test_code.py b/tests/test_pages/test_code.py index 463b7117b..3b60794f4 100644 --- a/tests/test_pages/test_code.py +++ b/tests/test_pages/test_code.py @@ -215,10 +215,10 @@ class CodeQuestionTest(SingleCoursePageSandboxTestBaseMixin, SubprocessRunpyContainerMixin, LocmemBackendTestsMixin, TestCase): - def test_data_files_missing_random_question_data_file(self): + def test_python_data_files_missing_random_question_data_file(self): file_name = "foo" markdown = ( - markdowns.CODE_MARKDWON_PATTERN_WITH_DATAFILES + markdowns.PYTHON_CODE_MARKDOWN_PATTERN_WITH_DATAFILES % {"extra_data_file": "- %s" % file_name} ) resp = self.get_page_sandbox_preview_response(markdown) @@ -227,17 +227,17 @@ def test_data_files_missing_random_question_data_file(self): self.assertResponseContextContains( resp, PAGE_ERRORS, "data file '%s' not found" % file_name) - def test_data_files_missing_random_question_data_file_bad_format(self): - markdown = markdowns.CODE_MARKDWON_WITH_DATAFILES_BAD_FORMAT + def test_python_data_files_missing_random_question_data_file_bad_format(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN_WITH_DATAFILES_BAD_FORMAT resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxNotHasValidPage(resp) self.assertResponseContextContains( resp, PAGE_ERRORS, "data file '%s' not found" % "['foo', 'bar']") - def test_not_multiple_submit_warning(self): + def test_python_not_multiple_submit_warning(self): markdown = ( - markdowns.CODE_MARKDWON_PATTERN_WITH_DATAFILES + markdowns.PYTHON_CODE_MARKDOWN_PATTERN_WITH_DATAFILES % {"extra_data_file": ""} ) resp = self.get_page_sandbox_preview_response(markdown) @@ -248,8 +248,8 @@ def test_not_multiple_submit_warning(self): NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING ) - def test_not_multiple_submit_warning2(self): - markdown = markdowns.CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 + def test_python_not_multiple_submit_warning2(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxHasValidPage(resp) @@ -258,8 +258,8 @@ def test_not_multiple_submit_warning2(self): NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING ) - def test_not_multiple_submit_warning3(self): - markdown = markdowns.CODE_MARKDWON_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 + def test_python_not_multiple_submit_warning3(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxHasValidPage(resp) @@ -268,16 +268,16 @@ def test_not_multiple_submit_warning3(self): NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING ) - def test_allow_multiple_submit(self): - markdown = markdowns.CODE_MARKDWON + def test_python_allow_multiple_submit(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) - def test_explicity_not_allow_multiple_submit(self): + def test_python_explicity_not_allow_multiple_submit(self): markdown = ( - markdowns.CODE_MARKDWON_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT + markdowns.PYTHON_CODE_MARKDOWN_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT % {"extra_data_file": ""} ) resp = self.get_page_sandbox_preview_response(markdown) @@ -285,8 +285,8 @@ def test_explicity_not_allow_multiple_submit(self): self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) - def test_question_without_test_code(self): - markdown = markdowns.CODE_MARKDWON_PATTERN_WITHOUT_TEST_CODE + def test_python_question_without_test_code(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN_PATTERN_WITHOUT_TEST_CODE resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxHasValidPage(resp) @@ -300,8 +300,8 @@ def test_question_without_test_code(self): self.assertResponseContextAnswerFeedbackContainsFeedback( resp, NO_CORRECTNESS_INFO_MSG) - def test_question_without_correct_code(self): - markdown = markdowns.CODE_MARKDWON_PATTERN_WITHOUT_CORRECT_CODE + def test_python_question_without_correct_code(self): + markdown = markdowns.PYTHON_CODE_MARKDOWN_PATTERN_WITHOUT_CORRECT_CODE resp = self.get_page_sandbox_preview_response(markdown) self.assertEqual(resp.status_code, 200) self.assertSandboxHasValidPage(resp) @@ -313,8 +313,8 @@ def test_question_without_correct_code(self): self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1) - def test_question_with_human_feedback_both_feedback_value_feedback_percentage_present(self): # noqa - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_both_feedback_value_feedback_percentage_present(self): # noqa + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 3, "human_feedback": "human_feedback_value: 2", "extra_attribute": "human_feedback_percentage: 20"}) @@ -326,8 +326,8 @@ def test_question_with_human_feedback_both_feedback_value_feedback_percentage_pr "'human_feedback_percentage' are not " "allowed to coexist") - def test_question_with_human_feedback_neither_feedback_value_feedback_percentage_present(self): # noqa - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_neither_feedback_value_feedback_percentage_present(self): # noqa + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 3, "human_feedback": "", "extra_attribute": ""}) @@ -338,8 +338,8 @@ def test_question_with_human_feedback_neither_feedback_value_feedback_percentage resp, PAGE_ERRORS, "expecting either 'human_feedback_value' " "or 'human_feedback_percentage', found neither.") - def test_question_with_human_feedback_used_feedback_value_warning(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_used_feedback_value_warning(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 3, "human_feedback": "human_feedback_value: 2", "extra_attribute": ""}) @@ -352,8 +352,8 @@ def test_question_with_human_feedback_used_feedback_value_warning(self): "use 'human_feedback_percentage' instead." ) - def test_question_with_human_feedback_used_feedback_value_bad_value(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_used_feedback_value_bad_value(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 0, "human_feedback": "human_feedback_value: 2", "extra_attribute": ""}) @@ -365,8 +365,8 @@ def test_question_with_human_feedback_used_feedback_value_bad_value(self): "if value of question is 0, use " "'human_feedback_percentage' instead") - def test_question_with_human_feedback_used_feedback_value_invalid(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_used_feedback_value_invalid(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 2, "human_feedback": "human_feedback_value: 3", "extra_attribute": ""}) @@ -377,8 +377,8 @@ def test_question_with_human_feedback_used_feedback_value_invalid(self): resp, PAGE_ERRORS, "human_feedback_value greater than overall " "value of question") - def test_question_with_human_feedback_feedback_percentage_invalid(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_feedback_percentage_invalid(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 2, "human_feedback": "human_feedback_percentage: 120", "extra_attribute": ""}) @@ -389,8 +389,8 @@ def test_question_with_human_feedback_feedback_percentage_invalid(self): resp, PAGE_ERRORS, "the value of human_feedback_percentage " "must be between 0 and 100") - def test_question_with_human_feedback_value_0_feedback_full_percentage(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_value_0_feedback_full_percentage(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 0, "human_feedback": "human_feedback_percentage: 100", "extra_attribute": ""}) @@ -399,8 +399,8 @@ def test_question_with_human_feedback_value_0_feedback_full_percentage(self): self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) - def test_question_with_human_feedback_value_0_feedback_0_percentage(self): - markdown = (markdowns.CODE_WITH_HUMAN_FEEDBACK_MARKDWON_PATTERN + def test_python_question_with_human_feedback_value_0_feedback_0_percentage(self): + markdown = (markdowns.PYTHON_CODE_WITH_HUMAN_FEEDBACK_MARKDOWN_PATTERN % {"value": 0, "human_feedback": "human_feedback_percentage: 0", "extra_attribute": ""}) @@ -409,11 +409,11 @@ def test_question_with_human_feedback_value_0_feedback_0_percentage(self): self.assertSandboxHasValidPage(resp) self.assertSandboxWarningTextContain(resp, None) - def test_request_run_with_retries_raise_uncaught_error_in_sandbox(self): + def test_python_request_run_with_retries_raise_uncaught_error_in_sandbox(self): with mock.patch( RUNCODE_WITH_RETRIES_PATH, autospec=True - ) as mock_runpy: + ) as mock_runcode: expected_error_str = ("This is an error raised with " "request_run_with_retries") @@ -422,10 +422,10 @@ def test_request_run_with_retries_raise_uncaught_error_in_sandbox(self): '

This is the explanation' '.

The following code is a valid answer: ' '
\nc = 2 + 1\n
') - mock_runpy.side_effect = RuntimeError(expected_error_str) + mock_runcode.side_effect = RuntimeError(expected_error_str) resp = self.get_page_sandbox_submit_answer_response( - markdowns.CODE_MARKDWON, + markdowns.PYTHON_CODE_MARKDOWN, answer_data={"answer": ['c = 1 + 2\r']}) self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, @@ -436,18 +436,18 @@ def test_request_run_with_retries_raise_uncaught_error_in_sandbox(self): # No email when in sandbox self.assertEqual(len(mail.outbox), 0) - def test_request_run_with_retries_raise_uncaught_error_debugging(self): + def test_python_request_run_with_retries_raise_uncaught_error_debugging(self): with mock.patch( RUNCODE_WITH_RETRIES_PATH, autospec=True - ) as mock_runpy: + ) as mock_runcode: expected_error_str = ("This is an error raised with " "request_run_with_retries") - mock_runpy.side_effect = RuntimeError(expected_error_str) + mock_runcode.side_effect = RuntimeError(expected_error_str) with override_settings(DEBUG=True): resp = self.get_page_sandbox_submit_answer_response( - markdowns.CODE_MARKDWON, + markdowns.PYTHON_CODE_MARKDOWN, answer_data={"answer": ['c = 1 + 2\r']}) self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, @@ -455,14 +455,14 @@ def test_request_run_with_retries_raise_uncaught_error_debugging(self): # No email when debugging self.assertEqual(len(mail.outbox), 0) - def test_request_run_with_retries_raise_uncaught_error(self): + def test_python_request_run_with_retries_raise_uncaught_error(self): with mock.patch( RUNCODE_WITH_RETRIES_PATH, autospec=True - ) as mock_runpy: + ) as mock_runcode: expected_error_str = ("This is an error raised with " "request_run_with_retries") - mock_runpy.side_effect = RuntimeError(expected_error_str) + mock_runcode.side_effect = RuntimeError(expected_error_str) with mock.patch("course.page.PageContext") as mock_page_context: mock_page_context.return_value.in_sandbox = False @@ -474,7 +474,178 @@ def test_request_run_with_retries_raise_uncaught_error(self): mock_page_context.return_value.commit_sha = b"1234" resp = self.get_page_sandbox_submit_answer_response( - markdowns.CODE_MARKDWON, + markdowns.PYTHON_CODE_MARKDOWN, + answer_data={"answer": ['c = 1 + 2\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + None) + self.assertEqual(len(mail.outbox), 1) + self.assertIn(expected_error_str, mail.outbox[0].body) + + def test_octave_data_files_missing_random_question_data_file(self): + file_name = "foo" + markdown = ( + markdowns.OCTAVE_CODE_MARKDOWN_PATTERN_WITH_DATAFILES + % {"extra_data_file": "- %s" % file_name} + ) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "data file '%s' not found" % file_name) + + def test_octave_data_files_missing_random_question_data_file_bad_format(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN_WITH_DATAFILES_BAD_FORMAT + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxNotHasValidPage(resp) + self.assertResponseContextContains( + resp, PAGE_ERRORS, "data file '%s' not found" % "['foo', 'bar']") + + def test_octave_not_multiple_submit_warning(self): + markdown = ( + markdowns.OCTAVE_CODE_MARKDOWN_PATTERN_WITH_DATAFILES + % {"extra_data_file": ""} + ) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING + ) + + def test_octave_not_multiple_submit_warning2(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT1 + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING + ) + + def test_octave_not_multiple_submit_warning3(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN_NOT_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT2 + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain( + resp, + NOT_ALLOW_MULTIPLE_SUBMISSION_WARNING + ) + + def test_octave_allow_multiple_submit(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + def test_octave_explicity_not_allow_multiple_submit(self): + markdown = ( + markdowns.OCTAVE_CODE_MARKDOWN_PATTERN_EXPLICITLY_NOT_ALLOW_MULTI_SUBMIT + % {"extra_data_file": ""} + ) + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + def test_octave_question_without_test_code(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN_PATTERN_WITHOUT_TEST_CODE + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + resp = self.get_page_sandbox_submit_answer_response( + markdown, + answer_data={"answer": ['c = b + a\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, None) + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, NO_CORRECTNESS_INFO_MSG) + + def test_octave_question_without_correct_code(self): + markdown = markdowns.OCTAVE_CODE_MARKDOWN_PATTERN_WITHOUT_CORRECT_CODE + resp = self.get_page_sandbox_preview_response(markdown) + self.assertEqual(resp.status_code, 200) + self.assertSandboxHasValidPage(resp) + self.assertSandboxWarningTextContain(resp, None) + + resp = self.get_page_sandbox_submit_answer_response( + markdown, + answer_data={"answer": ['c = b + a\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, 1) + + def test_octave_request_run_with_retries_raise_uncaught_error_in_sandbox(self): + with mock.patch( + RUNCODE_WITH_RETRIES_PATH, + autospec=True + ) as mock_runcode: + expected_error_str = ("This is an error raised with " + "request_run_with_retries") + + # correct_code_explanation and correct_code + expected_feedback = ( + '

This is the explanation' + '.

The following code is a valid answer: ' + '
\nc = 2 + 1\n
') + mock_runcode.side_effect = RuntimeError(expected_error_str) + + resp = self.get_page_sandbox_submit_answer_response( + markdowns.OCTAVE_CODE_MARKDOWN, + answer_data={"answer": ['c = 1 + 2\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + None) + + self.assertResponseContextContains(resp, "correct_answer", + expected_feedback) + # No email when in sandbox + self.assertEqual(len(mail.outbox), 0) + + def test_octave_request_run_with_retries_raise_uncaught_error_debugging(self): + with mock.patch( + RUNCODE_WITH_RETRIES_PATH, + autospec=True + ) as mock_runcode: + expected_error_str = ("This is an error raised with " + "request_run_with_retries") + mock_runcode.side_effect = RuntimeError(expected_error_str) + + with override_settings(DEBUG=True): + resp = self.get_page_sandbox_submit_answer_response( + markdowns.OCTAVE_CODE_MARKDOWN, + answer_data={"answer": ['c = 1 + 2\r']}) + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + None) + # No email when debugging + self.assertEqual(len(mail.outbox), 0) + + def test_octave_request_run_with_retries_raise_uncaught_error(self): + with mock.patch( + RUNCODE_WITH_RETRIES_PATH, + autospec=True + ) as mock_runcode: + expected_error_str = ("This is an error raised with " + "request_run_with_retries") + mock_runcode.side_effect = RuntimeError(expected_error_str) + + with mock.patch("course.page.PageContext") as mock_page_context: + mock_page_context.return_value.in_sandbox = False + mock_page_context.return_value.course = self.course + + # This remove the warning caused by mocked commit_sha value + # "CacheKeyWarning: Cache key contains characters that + # will cause errors ..." + mock_page_context.return_value.commit_sha = b"1234" + + resp = self.get_page_sandbox_submit_answer_response( + markdowns.OCTAVE_CODE_MARKDOWN, answer_data={"answer": ['c = 1 + 2\r']}) self.assertEqual(resp.status_code, 200) self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, @@ -486,10 +657,10 @@ def test_send_email_failure_when_request_run_with_retries_raise_uncaught_error(s with mock.patch( RUNCODE_WITH_RETRIES_PATH, autospec=True - ) as mock_runpy: + ) as mock_runcode: expected_error_str = ("This is an error raised with " "request_run_with_retries") - mock_runpy.side_effect = RuntimeError(expected_error_str) + mock_runcode.side_effect = RuntimeError(expected_error_str) with mock.patch("course.page.PageContext") as mock_page_context: mock_page_context.return_value.in_sandbox = False @@ -504,7 +675,7 @@ def test_send_email_failure_when_request_run_with_retries_raise_uncaught_error(s mock_send.side_effect = RuntimeError("some email send error") resp = self.get_page_sandbox_submit_answer_response( - markdowns.CODE_MARKDWON, + markdowns.PYTHON_CODE_MARKDOWN, answer_data={"answer": ['c = 1 + 2\r']}) self.assertContains(resp, expected_error_str) self.assertEqual(resp.status_code, 200) @@ -516,13 +687,47 @@ def assert_runpy_result_and_response(self, result_type, expected_msgs=None, not_expected_msgs=None, correctness=0, mail_count=0, in_html=False, **extra_result): - with mock.patch(RUNCODE_WITH_RETRIES_PATH, autospec=True) as mock_runpy: + with mock.patch(RUNCODE_WITH_RETRIES_PATH, autospec=True) as mock_runcode: + result = {"result": result_type} + result.update(extra_result) + mock_runcode.return_value = result + + resp = self.get_page_sandbox_submit_answer_response( + markdowns.PYTHON_CODE_MARKDOWN, + answer_data={"answer": ['c = 1 + 2\r']}) + + if expected_msgs is not None: + if isinstance(expected_msgs, six.text_type): + expected_msgs = [expected_msgs] + for msg in expected_msgs: + self.assertResponseContextAnswerFeedbackContainsFeedback( + resp, msg, html=in_html) + + if not_expected_msgs is not None: + if isinstance(not_expected_msgs, six.text_type): + not_expected_msgs = [not_expected_msgs] + for msg in not_expected_msgs: + self.assertResponseContextAnswerFeedbackNotContainsFeedback( + resp, msg) + self.assertResponseContextAnswerFeedbackNotContainsFeedback( + resp, msg, html=True) + + self.assertEqual(resp.status_code, 200) + self.assertResponseContextAnswerFeedbackCorrectnessEquals(resp, + correctness) + self.assertEqual(len(mail.outbox), mail_count) + + def assert_runoctave_result_and_response(self, result_type, expected_msgs=None, + not_expected_msgs=None, + correctness=0, mail_count=0, + in_html=False, **extra_result): + with mock.patch(RUNCODE_WITH_RETRIES_PATH, autospec=True) as mock_runcode: result = {"result": result_type} result.update(extra_result) - mock_runpy.return_value = result + mock_runcode.return_value = result resp = self.get_page_sandbox_submit_answer_response( - markdowns.CODE_MARKDWON, + markdowns.OCTAVE_CODE_MARKDOWN, answer_data={"answer": ['c = 1 + 2\r']}) if expected_msgs is not None: @@ -799,7 +1004,7 @@ def test_html_non_text_bleached_in_feedback(self): # {{{ https://github.com/inducer/relate/pull/448 def test_feedback_points_close_to_1(self): - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": 1.000000000002, "min_points": 0 @@ -816,7 +1021,7 @@ def test_feedback_points_close_to_1(self): def test_feedback_code_exceed_1(self): feedback_points = 1.1 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": feedback_points, "min_points": 0 @@ -838,7 +1043,7 @@ def test_feedback_code_exceed_1(self): def test_feedback_code_positive_close_to_0(self): # https://github.com/inducer/relate/pull/448#issuecomment-363655132 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": 1, "min_points": 0.00000000001 @@ -856,7 +1061,7 @@ def test_feedback_code_positive_close_to_0(self): def test_feedback_code_negative_close_to_0(self): # https://github.com/inducer/relate/pull/448#issuecomment-363655132 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": 1, "min_points": -0.00000000001 @@ -874,7 +1079,7 @@ def test_feedback_code_negative_close_to_0(self): def test_feedback_code_error_close_below_max_auto_feedback_points(self): feedback_points = MAX_EXTRA_CREDIT_FACTOR - 1e-6 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": feedback_points, "min_points": 0 @@ -892,7 +1097,7 @@ def test_feedback_code_error_close_below_max_auto_feedback_points(self): def test_feedback_code_error_close_above_max_auto_feedback_points(self): feedback_points = MAX_EXTRA_CREDIT_FACTOR + 1e-6 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": feedback_points, "min_points": 0 @@ -910,7 +1115,7 @@ def test_feedback_code_error_close_above_max_auto_feedback_points(self): def test_feedback_code_error_negative_feedback_points(self): invalid_feedback_points = -0.1 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": 1, "min_points": invalid_feedback_points @@ -937,7 +1142,7 @@ def test_feedback_code_error_negative_feedback_points(self): def test_feedback_code_error_exceed_max_extra_credit_factor(self): invalid_feedback_points = 10.1 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": invalid_feedback_points, "min_points": 0 @@ -962,7 +1167,7 @@ def test_feedback_code_error_exceed_max_extra_credit_factor(self): def test_feedback_code_error_exceed_max_extra_credit_factor_email(self): invalid_feedback_points = 10.1 - markdown = (markdowns.FEEDBACK_POINTS_CODE_MARKDWON_PATTERN + markdown = (markdowns.PYTHON_FEEDBACK_POINTS_CODE_MARKDOWN_PATTERN % { "full_points": invalid_feedback_points, "min_points": 0 diff --git a/tests/test_pages/utils.py b/tests/test_pages/utils.py index f6ba98428..d021796e2 100644 --- a/tests/test_pages/utils.py +++ b/tests/test_pages/utils.py @@ -74,9 +74,10 @@ def _skip_real_docker_test(): REAL_RELATE_DOCKER_URL = "unix:///var/run/docker.sock" REAL_RELATE_DOCKER_TLS_CONFIG = None REAL_RELATE_DOCKER_RUNPY_IMAGE = "inducer/relate-runcode-python" +REAL_RELATE_DOCKER_RUNOCTAVE_IMAGE = "davis/relate-runcode-octav" -class RealDockerTestMixin(object): +class RealDockerTestMixinPython(object): """ This is used for code question test with real docker container. Note: the test speed is slow when using this mixin. @@ -88,18 +89,18 @@ def setUpClass(cls): # noqa if skip_real_docker_test: raise SkipTest(SKIP_REAL_DOCKER_REASON) - super(RealDockerTestMixin, cls).setUpClass() + super(RealDockerTestMixinPython, cls).setUpClass() cls.override_docker_settings = override_settings( RELATE_DOCKER_URL=REAL_RELATE_DOCKER_URL, RELATE_DOCKER_RUNPY_IMAGE=REAL_RELATE_DOCKER_RUNPY_IMAGE, RELATE_DOCKER_TLS_CONFIG=REAL_RELATE_DOCKER_TLS_CONFIG - ) + )settings.RELATE_DOCKER_RUNPY_IMAGE cls.override_docker_settings.enable() cls.make_sure_docker_image_pulled() @classmethod def tearDownClass(cls): # noqa - super(RealDockerTestMixin, cls).tearDownClass() + super(RealDockerTestMixinPython, cls).tearDownClass() cls.override_docker_settings.disable() @classmethod @@ -114,3 +115,43 @@ def make_sure_docker_image_pulled(cls): if not bool(cli.images(REAL_RELATE_DOCKER_RUNPY_IMAGE)): # This should run only once and get cached on Travis-CI cli.pull(REAL_RELATE_DOCKER_RUNPY_IMAGE) + + +class RealDockerTestMixinOctave(object): + """ + This is used for code question test with real docker container. + Note: the test speed is slow when using this mixin. + """ + + @classmethod + def setUpClass(cls): # noqa + from unittest import SkipTest + if skip_real_docker_test: + raise SkipTest(SKIP_REAL_DOCKER_REASON) + + super(RealDockerTestMixinOctave, cls).setUpClass() + cls.override_docker_settings = override_settings( + RELATE_DOCKER_URL=REAL_RELATE_DOCKER_URL, + RELATE_DOCKER_RUNPY_IMAGE=REAL_RELATE_DOCKER_RUNOCTAVE_IMAGE, + RELATE_DOCKER_TLS_CONFIG=REAL_RELATE_DOCKER_TLS_CONFIG + )settings.RELATE_DOCKER_RUNOCTAVE_IMAGE + cls.override_docker_settings.enable() + cls.make_sure_docker_image_pulled() + + @classmethod + def tearDownClass(cls): # noqa + super(RealDockerTestMixinOctave, cls).tearDownClass() + cls.override_docker_settings.disable() + + @classmethod + def make_sure_docker_image_pulled(cls): + import docker + cli = docker.Client( + base_url=REAL_RELATE_DOCKER_URL, + tls=None, + timeout=15, + version="1.19") + + if not bool(cli.images(REAL_RELATE_DOCKER_RUNOCTAVE_IMAGE)): + # This should run only once and get cached on Travis-CI + cli.pull(REAL_RELATE_DOCKER_RUNOCTAVE_IMAGE) diff --git a/tests/test_utils.py b/tests/test_utils.py index 5931627ef..996fada72 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -51,6 +51,7 @@ from tests.base_test_mixins import ( CoursesTestMixinBase, SingleCoursePageTestMixin, SubprocessRunpyContainerMixin, + SubprocessRunOctaveContainerMixin, SingleCourseTestMixin, MockAddMessageMixing, ) from tests.utils import mock