code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
csv_list = line.split(",")
date_time_message = csv_list.pop(0).split(" ", 2)
otherinfo = dict()
for item in csv_list:
key_value_pair = item.split(":", 1)
key = key_value_pair[0].strip()
if len(key_value_pair) > 1:
value = key_value_pair[1].strip()
if not value:
value = "-"
else:
value = "-"
otherinfo[key] = value
self.message = '%s\n' \
'Date: %s\n' \
'Time: %s\n' \
'Request: %s\n' \
'Referrer: %s\n' \
'Server: %s\n' \
'Client: %s\n' \
'Host: %s\n' \
'Upstream: %s\n'
self.params = [
date_time_message[2],
date_time_message[0],
date_time_message[1],
otherinfo.get("request", "-"),
otherinfo.get("referrer", "-"),
otherinfo.get("server", "-"),
otherinfo.get("client", "-"),
otherinfo.get("host", "-"),
otherinfo.get("upstream", "-"),
]
self.site = otherinfo.get("referrer", "-") | def parse(self, line) | Parse a line of the Nginx error log | 2.176385 | 2.095029 | 1.038833 |
super().load(code, setup, teardown)
self._frame = self._original_frame.copy() | def load(self, code, setup='', teardown='') | Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code | 6.846435 | 10.654795 | 0.642568 |
if not sessions:
return None
print("Would you like to join a previous session or create a new session?")
for index, session in enumerate(sessions):
print(("{id} : {creator} started at {timestamp} ({hashid})"
.format(id=index+1, creator=session.get('creator'),
timestamp=session.get('created'), hashid=session.get('id'))))
print("{new_id} : Create a new session with the current files?"
.format(new_id=len(sessions)+1))
desired = input("Type the number of the session you'd like to join: ")
try:
outcome = int(desired.strip())
except:
outcome = len(sessions)+1
log.warning("Could not parse int for choice")
if outcome >= len(sessions):
log.info("Chose to start new session")
return None
else:
log.info("Resuming session {}".format(outcome - 1))
desired = sessions[outcome - 1]
return session | def prompt_for_existing_session(self, sessions) | Prompt user if they want to resume an old session
(or their partners session) or create a new session. | 4.052897 | 3.99115 | 1.015471 |
address = 'https://{}{}'.format(self.COLLAB_SERVER, endpoint)
params = {
'client_name': 'ok-client',
'client_version': client.__version__,
}
log.info('Sending messages to %s', address)
try:
r = requests.post(address, params=params, json=data, timeout=timeout)
r.raise_for_status()
return r.json()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError, Exception) as ex:
message = '{}: {}'.format(ex.__class__.__name__, str(ex))
log.warning(message)
print("There was an error connecting to the server."
"Run with --debug for more details")
return | def send_messages(self, data, timeout=30, endpoint='/collab/start/') | Send messages to server, along with user authentication. | 3.375326 | 3.36413 | 1.003328 |
data = {
'assignment': self.assignment.endpoint,
'messages': messages,
'submit': self.args.submit
}
address = self.API_ENDPOINT
address_params = {
'client_name': 'ok-client',
'client_version': client.__version__,
}
log.info('Sending messages to %s', address)
try:
response = requests.post(address,
params=address_params, json=data, timeout=timeout)
response.raise_for_status()
return response.json()['url']
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError, ValueError) as ex:
log.warning('%s: %s', ex.__class__.__name__, str(ex))
return | def send_messages(self, messages, timeout) | Send messages to server, along with user authentication. | 3.775343 | 3.785498 | 0.997317 |
if not hasattr(self, 'guidance_json'):
return False
checksum = self.guidance_json.get('checksum')
contents = self.guidance_json.get('db')
hash_key = ("{}{}".format(json.dumps(contents, sort_keys=True),
self.assignment.endpoint).encode())
digest = hashlib.md5(hash_key).hexdigest()
if not checksum:
log.warning("Checksum on guidance not found. Invalidating file")
return False
if digest != checksum:
log.warning("Checksum %s did not match actual digest %s", checksum, digest)
return False
return True | def validate_json(self) | Ensure that the checksum matches. | 5.138534 | 4.740769 | 1.083903 |
# Checks to see the student currently has a treatment group number.
if not os.path.isfile(self.current_working_dir + LOCAL_TG_FILE):
cur_email = self.assignment.get_student_email()
log.info("Current email is %s", cur_email)
if not cur_email:
self.tg_id = -1
return EMPTY_MISUCOUNT_TGID_PRNTEDMSG
tg_url = ("{}{}/{}{}"
.format(TGSERVER, cur_email, self.assignment_name,
TG_SERVER_ENDING))
try:
log.info("Accessing treatment server at %s", tg_url)
data = requests.get(tg_url, timeout=1).json()
except IOError:
data = {"tg": -1}
log.warning("Failed to communicate to server", exc_info=True)
if data.get("tg") is None:
log.warning("Server returned back a bad treatment group ID.")
data = {"tg": -1}
with open(self.current_working_dir + LOCAL_TG_FILE, "w") as fd:
fd.write(str(data["tg"]))
tg_file = open(self.current_working_dir + LOCAL_TG_FILE, 'r')
self.tg_id = int(tg_file.read()) | def set_tg(self) | Try to grab the treatment group number for the student.
If there is no treatment group number available, request it
from the server. | 4.777457 | 4.184796 | 1.141622 |
# Disable opt-out.
# if self.assignment.cmd_args.no_experiments:
# log.info("Skipping prompt due to --no-experiments")
# return "Skipped due to --no-experiments"
if self.load_error:
return 'Failed to read guidance config file'
if hasattr(self.assignment, 'is_test'):
log.info("Skipping prompt due to test mode")
return "Test response"
if prob is None:
prob = self.prompt_probability
if random.random() > prob:
log.info("Did not prompt for rationale: Insufficient Probability")
return "Did not prompt for rationale"
with format.block(style="-"):
rationale = prompt.explanation_msg(EXPLANTION_PROMPT,
short_msg=CONFIRM_BLANK_EXPLANATION)
if prob is None:
# Reduce future prompt likelihood
self.prompt_probability = 0
if orig_response:
print('Thanks! Your original response was: {}'.format('\n'.join(orig_response)))
return rationale | def prompt_with_prob(self, orig_response=None, prob=None) | Ask for rationale with a specific level of probability. | 8.320225 | 7.96754 | 1.044265 |
config.create_config_directory()
ca_certs_file = config.CERT_FILE
ca_certs_contents = requests.__loader__.get_data('requests/cacert.pem')
should_write_certs = True
if os.path.isfile(ca_certs_file):
with open(ca_certs_file, 'rb') as f:
existing_certs = f.read()
if existing_certs != ca_certs_contents:
should_write_certs = True
print("Updating local SSL certificates")
else:
should_write_certs = False
if should_write_certs:
with open(ca_certs_file, 'wb') as f:
f.write(ca_certs_contents)
os.environ['REQUESTS_CA_BUNDLE'] = ca_certs_file | def patch_requests() | Customize the cacerts.pem file that requests uses.
Automatically updates the cert file if the contents are different. | 2.454538 | 2.326592 | 1.054993 |
files = {}
# TODO(albert): move this to AnalyticsProtocol
if self.args.submit:
files['submit'] = True
for file in self.assignment.src:
if not self.is_file(file):
# TODO(albert): add an error message
contents = ''
log.warning('File {} does not exist'.format(file))
else:
contents = self.read_file(file)
log.info('Loaded contents of {} to send to server'.format(file))
files[file] = contents
messages['file_contents'] = files | def run(self, messages) | Find all source files and return their complete contents.
Source files are considered to be files listed self.assignment.src.
If a certain source filepath is not a valid file (e.g. does not exist
or is not a file), then the contents associated with that filepath will
be an empty string.
RETURNS:
dict; a mapping of source filepath -> contents as strings. | 6.094546 | 4.934012 | 1.235211 |
if not self.args.lock:
return
format.print_line('~')
print('Locking tests')
print()
for test in self.assignment.test_map.values():
log.info('Locking {}'.format(test.name))
test.lock(self._hash_fn) | def run(self, messages) | Responsible for locking each test. | 10.607541 | 7.946624 | 1.334849 |
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((hostname, port)) # port=0 finds an open port
except OSError as e:
log.warning("Could not bind to %s:%s %s", hostname, port, e)
if port == 0:
print('Unable to find an open port for authentication.')
raise AuthenticationException(e)
else:
return pick_free_port(hostname, 0)
addr, port = s.getsockname()
s.close()
return port | def pick_free_port(hostname=REDIRECT_HOST, port=0) | Try to bind a port. Default=0 selects a free port. | 2.940767 | 2.859516 | 1.028414 |
try:
response = requests.post(server + TOKEN_ENDPOINT, data=data, timeout=TIMEOUT)
body = response.json()
except Exception as e:
log.warning('Other error when exchanging code', exc_info=True)
raise OAuthException(
error='Authentication Failed',
error_description=str(e))
if 'error' in body:
log.error(body)
raise OAuthException(
error=body.get('error', 'Unknown Error'),
error_description = body.get('error_description', ''))
return body | def make_token_post(server, data) | Try getting an access token from the server. If successful, returns the
JSON response. If unsuccessful, raises an OAuthException. | 3.031463 | 2.912323 | 1.040909 |
server = server_url(cmd_args)
network.check_ssl()
access_token = None
try:
assert not force
access_token = refresh_local_token(server)
except Exception:
print('Performing authentication')
access_token = perform_oauth(get_code, cmd_args, endpoint)
email = display_student_email(cmd_args, access_token)
if not email:
log.warning('Could not get login email. Try logging in again.')
log.debug('Authenticated with access token={}'.format(access_token))
return access_token | def authenticate(cmd_args, endpoint='', force=False) | Returns an OAuth token that can be passed to the server for
identification. If FORCE is False, it will attempt to use a cached token
or refresh the OAuth token. | 6.014288 | 5.904684 | 1.018562 |
server = server_url(cmd_args)
network.check_ssl()
access_token = None
if not force:
try:
access_token = refresh_local_token(server)
except OAuthException as e:
# Account for Invalid Grant Error During make_token_post
if not silent:
raise e
return notebook_authenticate(cmd_args, force=True, silent=False)
if not access_token:
access_token = perform_oauth(
get_code_via_terminal,
cmd_args,
copy_msg=NOTEBOOK_COPY_MESSAGE,
paste_msg=NOTEBOOK_PASTE_MESSAGE)
# Always display email
email = display_student_email(cmd_args, access_token)
if email is None and not force:
return notebook_authenticate(cmd_args, force=True) # Token has expired
elif email is None:
# Did not get a valid token even after a fresh login
log.warning('Could not get login email. You may have been logged out. '
' Try logging in again.')
return access_token | def notebook_authenticate(cmd_args, force=False, silent=True) | Similiar to authenticate but prints student emails after
all calls and uses a different way to get codes. If SILENT is True,
it will suppress the error message and redirect to FORCE=True | 6.25472 | 5.953295 | 1.050632 |
log.info("Attempting to get student email")
if cmd_args.local:
return None
access_token = authenticate(cmd_args, endpoint=endpoint, force=False)
if not access_token:
return None
try:
return get_info(cmd_args, access_token)['email']
except IOError as e:
return None | def get_student_email(cmd_args, endpoint='') | Attempts to get the student's email. Returns the email, or None. | 3.778605 | 3.367566 | 1.122058 |
student_email = get_student_email(cmd_args, endpoint)
if not student_email:
return "Unknown"
return hashlib.md5(student_email.encode()).hexdigest() | def get_identifier(cmd_args, endpoint='') | Obtain anonmyzied identifier. | 3.881905 | 3.83436 | 1.0124 |
address = VERSION_ENDPOINT.format(server=server)
print('Checking for software updates...')
log.info('Existing OK version: %s', version)
log.info('Checking latest version from %s', address)
try:
response = requests.get(address, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Network error when checking for updates.')
log.warning('Network error when checking version from %s: %s', address,
str(e), stack_info=True)
return False
response_json = response.json()
if not _validate_api_response(response_json):
print('Error while checking updates: malformed server response')
log.info('Malformed response from %s: %s', address, response.text)
return False
current_version = response_json['data']['results'][0]['current_version']
if current_version == version:
print('OK is up to date')
return True
download_link = response_json['data']['results'][0]['download_link']
log.info('Downloading version %s from %s', current_version, download_link)
try:
response = requests.get(download_link, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Error when downloading new version of OK')
log.warning('Error when downloading new version of OK: %s', str(e),
stack_info=True)
return False
log.info('Writing new version to %s', filename)
zip_binary = response.content
try:
_write_zip(filename, zip_binary)
except IOError as e:
print('Error when downloading new version of OK')
log.warning('Error writing to %s: %s', filename, str(e))
return False
else:
print('Updated to version: {}'.format(current_version))
log.info('Successfully wrote to %s', filename)
return True | def check_version(server, version, filename, timeout=SHORT_TIMEOUT) | Check for the latest version of OK and update accordingly. | 2.423645 | 2.276758 | 1.064516 |
self.console.load(self.lines, setup=self.setup, teardown=self.teardown)
return self.console.interpret() | def run(self) | Implements the GradedTestCase interface. | 10.534891 | 9.725114 | 1.083267 |
print(self.setup.strip())
prompt_num = 0
current_prompt = []
try:
for line in self.lines:
if isinstance(line, str) and line:
print(line)
current_prompt.append(line)
elif isinstance(line, CodeAnswer):
prompt_num += 1
if not line.locked:
print('\n'.join(line.output))
continue
unique_id = self._construct_unique_id(unique_id_prefix, self.lines)
line.output = interact(unique_id,
case_id + ' > Prompt {}'.format(prompt_num),
'\n'.join(current_prompt),
line.output, line.choices)
line.locked = False
current_prompt = []
self.locked = False
finally:
self._sync_code() | def unlock(self, unique_id_prefix, case_id, interact) | Unlocks the CodeCase.
PARAMETERS:
unique_id_prefix -- string; a prefix of a unique identifier for this
Case, for purposes of analytics.
case_id -- string; an identifier for this Case, for purposes of
analytics.
interact -- function; handles user interaction during the unlocking
phase. | 4.766878 | 4.740557 | 1.005552 |
processed_lines = []
for line in textwrap.dedent(code).splitlines():
if not line or line.startswith(PS1) or line.startswith(PS2):
processed_lines.append(line)
continue
assert len(processed_lines) > 0, 'code improperly formatted: {}'.format(code)
if not isinstance(processed_lines[-1], CodeAnswer):
processed_lines.append(CodeAnswer())
processed_lines[-1].update(line)
return processed_lines | def split_code(cls, code, PS1, PS2) | Splits the given string of code based on the provided PS1 and PS2
symbols.
PARAMETERS:
code -- str; lines of interpretable code, using PS1 and PS2 prompts
PS1 -- str; first-level prompt symbol
PS2 -- str; second-level prompt symbol
RETURN:
list; a processed sequence of lines corresponding to the input code. | 3.105555 | 3.42087 | 0.907826 |
new_code = []
for line in self.lines:
if isinstance(line, CodeAnswer):
new_code.append(line.dump())
else:
new_code.append(line)
self.code = '\n'.join(new_code) | def _sync_code(self) | Syncs the current state of self.lines with self.code, the
serializable string representing the set of code. | 3.256685 | 2.640052 | 1.233568 |
text = []
for line in lines:
if isinstance(line, str):
text.append(line)
elif isinstance(line, CodeAnswer):
text.append(line.dump())
return id_prefix + '\n' + '\n'.join(text) | def _construct_unique_id(self, id_prefix, lines) | Constructs a unique ID for a particular prompt in this case,
based on the id_prefix and the lines in the prompt. | 3.454576 | 3.303649 | 1.045685 |
self._setup = textwrap.dedent(setup).splitlines()
self._code = code
self._teardown = textwrap.dedent(teardown).splitlines() | def load(self, code, setup='', teardown='') | Prepares a set of setup, test, and teardown code to be
run in the console.
PARAMETERS:
code -- list; processed lines of code. Elements in the list are
either strings (input) or CodeAnswer objects (output)
setup -- str; raw setup code
teardown -- str; raw teardown code | 2.747003 | 3.212868 | 0.855 |
if not self._interpret_lines(self._setup):
return False
success = self._interpret_lines(self._code, compare_all=True)
success &= self._interpret_lines(self._teardown)
return success | def interpret(self) | Interprets the console on the loaded code.
RETURNS:
bool; True if the code passes, False otherwise. | 6.912502 | 6.847045 | 1.00956 |
current = []
for line in lines + ['']:
if isinstance(line, str):
if current and (line.startswith(self.PS1) or not line):
# Previous prompt ends when PS1 or a blank line occurs
try:
if compare_all:
self._compare(CodeAnswer(), '\n'.join(current))
else:
self.evaluate('\n'.join(current))
except ConsoleException:
return False
current = []
if line:
print(line)
line = self._strip_prompt(line)
current.append(line)
elif isinstance(line, CodeAnswer):
assert len(current) > 0, 'Answer without a prompt'
try:
self._compare(line, '\n'.join(current))
except ConsoleException:
return False
current = []
return True | def _interpret_lines(self, lines, compare_all=False) | Interprets the set of lines.
PARAMTERS:
lines -- list of str; lines of code
compare_all -- bool; if True, check for no output for lines that are not
followed by a CodeAnswer
RETURNS:
bool; True if successful, False otherwise. | 4.381965 | 4.257946 | 1.029126 |
result = list(self.output_lines())
if self.locked:
result.append('# locked')
if self.choices:
for choice in self.choices:
result.append('# choice: ' + choice)
if self.explanation:
result.append('# explanation: ' + self.explanation)
return '\n'.join(result) | def dump(self) | Serialize a test case to a string. | 3.956671 | 3.604857 | 1.097594 |
if self.exception:
return [self.EXCEPTION_HEADERS[0], ' ...'] + self.exception_detail
else:
return self.output | def output_lines(self) | Return a sequence of lines, suitable for printing or comparing
answers. | 11.89311 | 10.117262 | 1.175527 |
if isinstance(json, int) or isinstance(json, float):
return str(json)
elif isinstance(json, str):
if '\n' in json:
return 'r'
return repr(json)
elif isinstance(json, list):
lst = [indent(prettyjson(el, indentation), indentation) for el in json]
return '[\n' + ',\n'.join(lst) + '\n]'
elif isinstance(json, dict):
pairs = []
for k, v in sorted(json.items()):
k = prettyjson(k, indentation)
v = prettyjson(v, indentation)
pairs.append(indent(k + ': ' + v, indentation))
return '{\n' + ',\n'.join(pairs) + '\n}'
else:
raise exceptions.SerializeException('Invalid json type: {}'.format(json)) | def prettyjson(json, indentation=' ') | Formats a Python-object into a string in a JSON like way, but
uses triple quotes for multiline strings.
PARAMETERS:
json -- Python object that is serializable into json.
indentation -- str; represents one level of indentation
NOTES:
All multiline strings are treated as raw strings.
RETURNS:
str; the formatted json-like string. | 1.999496 | 2.026502 | 0.986674 |
for name, contents in file_contents.items():
if os.path.splitext(name)[1] != '.ipynb':
continue
if not contents:
return False
try:
json_object = json.loads(contents)
except ValueError:
return False
return True | def validate_contents(file_contents) | Ensures that all ipynb files in FILE_CONTENTS
are valid JSON files. | 3.134093 | 2.403688 | 1.303869 |
modification_time = os.path.getmtime(filename)
start_time = time.time()
while time.time() < start_time + timeout:
if (os.path.getmtime(filename) > modification_time and
os.path.getsize(filename) > 0):
return True
time.sleep(0.2)
return False | def wait_for_save(filename, timeout=5) | Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise. | 1.918048 | 1.972619 | 0.972336 |
messages = {}
self.assignment.set_args(
score=True,
score_out=score_out,
)
if env is None:
import __main__
env = __main__.__dict__
self.run('scoring', messages, env=env)
return messages['scoring'] | def score(self, env=None, score_out=None) | Run the scoring protocol.
score_out -- str; a file name to write the point breakdown
into.
Returns: dict; maps score tag (str) -> points (float) | 5.65162 | 6.049733 | 0.934193 |
try:
from IPython.display import display, Javascript
except ImportError:
log.warning("Could not import IPython Display Function")
print("Make sure to save your notebook before sending it to OK!")
return
if self.mode == "jupyter":
display(Javascript('IPython.notebook.save_checkpoint();'))
display(Javascript('IPython.notebook.save_notebook();'))
elif self.mode == "jupyterlab":
display(Javascript('document.querySelector(\'[data-command="docmanager:save"]\').click();'))
print('Saving notebook...', end=' ')
ipynbs = [path for path in self.assignment.src
if os.path.splitext(path)[1] == '.ipynb']
# Wait for first .ipynb to save
if ipynbs:
if wait_for_save(ipynbs[0]):
print("Saved '{}'.".format(ipynbs[0]))
else:
log.warning("Timed out waiting for IPython save")
print("Could not automatically save \'{}\'".format(ipynbs[0]))
print("Make sure your notebook"
" is correctly named and saved before submitting to OK!".format(ipynbs[0]))
return False
else:
print("No valid file sources found")
return True | def save_notebook(self) | Saves the current notebook by
injecting JavaScript to save to .ipynb file. | 5.134652 | 5.089118 | 1.008947 |
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--config', type=str,
help="Specify a configuration file")
return parser.parse_args() | def parse_input() | Parses command line input. | 2.44117 | 2.251856 | 1.08407 |
args = parse_input()
args.lock = True
args.question = []
args.all = False
args.timeout = 0
args.verbose = False
args.interactive = False
try:
assign = assignment.load_assignment(args.config, args)
msgs = messages.Messages()
lock.protocol(args, assign).run(msgs)
except (ex.LoadingException, ex.SerializeException) as e:
log.warning('Assignment could not instantiate', exc_info=True)
print('Error: ' + str(e).strip())
exit(1)
except (KeyboardInterrupt, EOFError):
log.info('Quitting...')
else:
assign.dump_tests() | def main() | Run the LockingProtocol. | 7.025597 | 6.615616 | 1.061972 |
if not os.path.exists(src_directory):
abort('Tree ' + src_directory + ' does not exist.')
for root, _, files in os.walk(src_directory):
for filename in files:
if not filename.endswith(('.py', '.pem')):
continue
fullname = os.path.join(root, filename)
arcname = fullname.replace(src_directory, dst_directory)
zipf.write(fullname, arcname=arcname) | def write_tree(zipf, src_directory, dst_directory) | Write all .py files in a source directory to a destination directory
inside a zip archive. | 2.243415 | 2.191181 | 1.023838 |
log_id = self._num_logs
self._logs[log_id] = []
self._num_logs += 1
return log_id | def new_log(self) | Registers a new log so that calls to write will append to the log.
RETURN:
int; a unique ID to reference the log. | 3.567411 | 3.107561 | 1.147978 |
self._current_stream.write(msg)
for log in self._logs.values():
log.append(msg) | def write(self, msg) | Writes msg to the current output stream (either standard
out or dev/null). If a log has been registered, append msg
to the log.
PARAMTERS:
msg -- str | 6.043245 | 5.634884 | 1.07247 |
hash_regex = re.compile(r'\w{32}')
lines = assess_id.split('\n')
canon_lines = []
parsing_code = False
for line in lines:
line = line.strip()
if not parsing_code and len(line) > 0:
for prompt in DICT_PROMPT_TO_CHARACTER:
if line.startswith(prompt):
parsing_code = True
comment_character = DICT_PROMPT_TO_CHARACTER[prompt]
break
# If False still in preamble and do not include in canonicalized lines
if parsing_code:
# Remove any comments
comment_index = line.find(comment_character)
if comment_index >= 0:
line = line[0:comment_index].strip()
# If a hashed answer, replace with constant since these vary by semester
if hash_regex.match(line):
line = 'LOCKED_ANSWER'
# Remove any '# locked' text since these are here regardless of language
if line == '# locked':
line = ''
if len(line) > 0:
canon_lines.append(line)
return '\n'.join(canon_lines) + '\n' | def canonicalize(assess_id) | Takes an assessment/question's ID and canonicalizeicalizes it across iterations of
a course. | 4.593241 | 4.629536 | 0.99216 |
if self.args.local:
return
# Only run hinting protocol on supported assignments.
if self.assignment.endpoint not in self.SUPPORTED_ASSIGNMENTS:
message = "{0} does not support hinting".format(self.assignment.endpoint)
log.info(message)
if self.args.hint:
print(message)
return
if 'analytics' not in messages:
log.info('Analytics Protocol is required for hint generation')
return
if 'file_contents' not in messages:
log.info('File Contents needed to generate hints')
return
if self.args.no_experiments:
messages['hinting'] = {'disabled': 'user'}
return
messages['hinting'] = {}
history = messages['analytics'].get('history', {})
questions = history.get('questions', [])
current_q = history.get('question', {})
messages['hinting']['flagged'] = self.args.hint
for question in current_q:
if question not in questions:
continue
stats = questions[question]
is_solved = stats['solved'] == True
messages['hinting'][question] = {'prompts': {}, 'reflection': {}}
hint_info = messages['hinting'][question]
# Determine a users elgibility for a prompt
# If the user just solved this question, provide a reflection prompt
if is_solved:
hint_info['elgible'] = False
hint_info['disabled'] = 'solved'
if self.args.hint:
print("This question has already been solved.")
continue
elif stats['attempts'] < self.SMALL_EFFORT:
log.info("Question %s is not elgible: Attempts: %s, Solved: %s",
question, stats['attempts'], is_solved)
hint_info['elgible'] = False
if self.args.hint:
hint_info['disabled'] = 'attempt-count'
print("You need to make a few more attempts before the hint system is enabled")
continue
else:
# Only prompt every WAIT_ATTEMPTS attempts to avoid annoying user
if stats['attempts'] % self.WAIT_ATTEMPTS != 0:
hint_info['disabled'] = 'timer'
hint_info['elgible'] = False
log.info('Waiting for %d more attempts before prompting',
stats['attempts'] % self.WAIT_ATTEMPTS)
else:
hint_info['elgible'] = not is_solved
if not self.args.hint:
if hint_info['elgible']:
with format.block("-"):
print("To get hints, try using python3 ok --hint -q {}".format(question))
hint_info['suggested'] = True
continue
hint_info['accept'] = True
with format.block("-"):
print(("Thinking of a hint for {}".format(question) +
"... (This could take up to 30 seconds)"))
pre_hint = random.choice(PRE_HINT_MESSAGES)
print("In the meantime, consider: \n{}".format(pre_hint))
hint_info['pre-prompt'] = pre_hint
log.info('Prompting for hint on %s', question)
try:
response = self.query_server(messages, question)
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError):
log.debug("Network error while fetching hint", exc_info=True)
hint_info['fetch_error'] = True
print("\r\nNetwork Error while generating hint. Try again later")
response = None
continue
if response:
hint_info['response'] = response
hint = response.get('message')
pre_prompt = response.get('pre-prompt')
post_prompt = response.get('post-prompt')
system_error = response.get('system-error')
log.info("Hint server response: {}".format(response))
if not hint:
if system_error:
print("{}".format(system_error))
else:
print("Sorry. No hints found for the current code. Try again making after some changes")
continue
# Provide padding for the the hint
print("\n{}".format(hint.rstrip()))
if post_prompt:
results['prompts'][query] = prompt.explanation_msg(post_prompt) | def run(self, messages) | Determine if a student is elgible to recieve a hint. Based on their
state, poses reflection questions.
After more attempts, ask if students would like hints. If so, query
the server. | 4.824914 | 4.626269 | 1.042939 |
total = 0
outfile = open(outfile, 'w') if outfile else sys.stdout
format.print_line('-')
print('Point breakdown', file=outfile)
for name, (score, max_score) in scores.items():
print(' {}: {}/{}'.format(name, score, max_score), file=outfile)
total += score
print(file=outfile)
print('Score:', file=outfile)
print(' Total: {}'.format(total), file=outfile)
return {'Total': total} | def display_breakdown(scores, outfile=None) | Writes the point breakdown to `outfile` given a dictionary of scores.
`outfile` should be a string. If `outfile` is None, write to stdout.
RETURNS:
dict; 'Total' -> finalized score (float) | 3.435395 | 3.446416 | 0.996802 |
if not self.args.score or self.args.testing:
return
format.print_line('~')
print('Scoring tests')
print()
raw_scores = OrderedDict()
for test in self.assignment.specified_tests:
assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'
log.info('Scoring test {}'.format(test.name))
# A hack that allows programmatic API users to plumb a custom
# environment through to Python tests.
# Use type to ensure is an actual OkTest and not a subclass
if type(test) == ok_test_models.OkTest:
score = test.score(env=env)
else:
score = test.score()
raw_scores[test.name] = (score, test.points)
messages['scoring'] = display_breakdown(raw_scores, self.args.score_out)
print() | def run(self, messages, env=None) | Score tests and print results. Tests are taken from
self.assignment.specified_tests. A score breakdown by question and the
total score are both printed.
ENV is used by the programatic API for Python doctests only. | 9.341249 | 7.165399 | 1.303661 |
return hmac.new(key.encode('utf-8'), text.encode('utf-8')).hexdigest() | def lock(key, text) | Locks the given text using the given key and returns the result | 3.330771 | 3.940324 | 0.845304 |
if not self.args.unlock:
return
format.print_line('~')
print('Unlocking tests')
print()
print('At each "{}", type what you would expect the output to be.'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()
for test in self.assignment.specified_tests:
log.info('Unlocking test {}'.format(test.name))
self.current_test = test.name
# Reset guidance explanation probability for every question
self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY
try:
test.unlock(self.interact)
except (KeyboardInterrupt, EOFError):
try:
# TODO(albert): When you use Ctrl+C in Windows, it
# throws two exceptions, so you need to catch both
# of them. Find a cleaner fix for this.
print()
print('-- Exiting unlocker --')
except (KeyboardInterrupt, EOFError):
pass
print()
break
messages['unlock'] = self.analytics | def run(self, messages) | Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics. | 9.514184 | 9.181723 | 1.036209 |
if randomize and choices:
choices = random.sample(choices, len(choices))
correct = False
while not correct:
if choices:
assert len(answer) == 1, 'Choices must have 1 line of output'
choice_map = self._display_choices(choices)
question_timestamp = datetime.now()
input_lines = []
for line_number, line in enumerate(answer):
if len(answer) == 1:
prompt = self.PROMPT
else:
prompt = '(line {}){}'.format(line_number + 1, self.PROMPT)
student_input = format.normalize(self._input(prompt))
self._add_history(student_input)
if student_input in self.EXIT_INPUTS:
raise EOFError
if choices and student_input in choice_map:
student_input = choice_map[student_input]
correct_answer = self._verify_student_input(student_input, line)
if correct_answer:
input_lines.append(correct_answer)
else:
input_lines.append(student_input)
break
else:
correct = True
tg_id = -1
misU_count_dict = {}
rationale = "Unknown - Default Value"
if not correct:
guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines,
self.hash_key)
misU_count_dict, tg_id, printed_msg, rationale = guidance_data
else:
rationale = self.guidance_util.prompt_with_prob()
print("-- OK! --")
printed_msg = ["-- OK! --"]
self.analytics.append({
'id': unique_id,
'case_id': case_id,
'question timestamp': self.unix_time(question_timestamp),
'answer timestamp': self.unix_time(datetime.now()),
'prompt': question_prompt,
'answer': input_lines,
'correct': correct,
'treatment group id': tg_id,
'rationale': rationale,
'misU count': misU_count_dict,
'printed msg': printed_msg
})
print()
return input_lines | def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True) | Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output. | 4.140405 | 4.230194 | 0.978774 |
guesses = [student_input]
try:
guesses.append(repr(ast.literal_eval(student_input)))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess | def _verify_student_input(self, student_input, locked) | If the student's answer is correct, returns the normalized answer.
Otherwise, returns None. | 3.724778 | 3.542315 | 1.051509 |
print("Choose the number of the correct choice:")
choice_map = {}
for i, choice in enumerate(choices):
i = str(i)
print('{}) {}'.format(i, format.indent(choice,
' ' * (len(i) + 2)).strip()))
choice = format.normalize(choice)
choice_map[i] = choice
return choice_map | def _display_choices(self, choices) | Prints a mapping of numbers to choices and returns the
mapping as a dictionary. | 4.710119 | 4.375559 | 1.076461 |
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(delta.total_seconds()) | def unix_time(self, dt) | Returns the number of seconds since the UNIX epoch for the given
datetime (dt).
PARAMETERS:
dt -- datetime | 2.595171 | 4.552588 | 0.570043 |
if timeout == 0:
return fn(*args, **kargs)
submission = __ReturningThread(fn, args, kargs)
submission.start()
submission.join(timeout)
if submission.is_alive():
raise exceptions.Timeout(timeout)
if submission.error is not None:
raise submission.error
return submission.result | def timed(timeout, fn, args=(), kargs={}) | For a nonzero timeout, evaluates a call expression in a separate thread.
If the timeout is 0, the expression is evaluated in the main thread.
PARAMETERS:
fn -- function; Python function to be evaluated
args -- tuple; positional arguments for fn
kargs -- dict; keyword arguments for fn
timeout -- int; number of seconds before timer interrupt
RETURN:
Result of calling fn(*args, **kargs).
RAISES:
Timeout -- if thread takes longer than timeout to execute
Error -- if calling fn raises an error, raise it | 3.084637 | 3.467734 | 0.889525 |
is_submit = current and self.args.submit and not self.args.revise
is_revision = current and self.args.revise
data = {
'assignment': self.assignment.endpoint,
'messages': messages,
'submit': is_submit
}
if is_revision:
address = self.REVISION_ENDPOINT.format(server=self.assignment.server_url)
else:
address = self.BACKUP_ENDPOINT.format(server=self.assignment.server_url)
address_params = {
'client_name': 'ok-client',
'client_version': client.__version__,
}
headers = {'Authorization': 'Bearer {}'.format(access_token)}
log.info('Sending messages to %s', address)
response = requests.post(address, headers=headers,
params=address_params, json=data, timeout=timeout)
response.raise_for_status()
return response.json() | def send_messages(self, access_token, messages, timeout, current) | Send messages to server, along with user authentication. | 3.493593 | 3.433219 | 1.017585 |
parser = argparse.ArgumentParser(
prog='python3 ok',
description=__doc__,
usage='%(prog)s [--help] [options]',
formatter_class=argparse.RawDescriptionHelpFormatter)
testing = parser.add_argument_group('running tests')
testing.add_argument('-q', '--question', type=str, action='append',
help="run tests for a specific question")
testing.add_argument('--suite', type=str, default=None,
help="run cases from a specific suite")
testing.add_argument('--case', type=str, action='append',
help="run specific cases")
testing.add_argument('-u', '--unlock', action='store_true',
help="unlock tests interactively")
testing.add_argument('-i', '--interactive', action='store_true',
help="start the Python interpreter after a failed test")
testing.add_argument('-v', '--verbose', action='store_true',
help="show all tests, not just passing tests")
testing.add_argument('-t', '--testing', nargs='?', type=str, const='mytests.rst',
help='run tests from rst file (default: mytests.rst)')
testing.add_argument('--all', action='store_true',
help="run tests for all questions in config file")
testing.add_argument('--submit', action='store_true',
help="submit the assignment")
testing.add_argument('--backup', action='store_true',
help="attempt to reliably backup your work")
testing.add_argument('--revise', action='store_true',
help="submit composition revision")
testing.add_argument('--timeout', type=int, default=10,
help="set the timeout duration (in seconds) for running tests")
testing.add_argument('-cov', '--coverage', action='store_true',
help="get suggestions on what lines to add tests for")
# Experiments
experiment = parser.add_argument_group('experiment options')
experiment.add_argument('--no-experiments', action='store_true',
help="do not run experimental features")
experiment.add_argument('--hint', action='store_true',
help="give a hint (if available)")
experiment.add_argument('--style', action='store_true',
help="run AutoStyle feedback system")
experiment.add_argument('--collab', action='store_true',
help="launch collaborative programming environment")
# Debug information
debug = parser.add_argument_group('debugging options')
debug.add_argument('--version', action='store_true',
help="print the version number and exit")
debug.add_argument('--tests', action='store_true',
help="display a list of all available tests")
debug.add_argument('--debug', action='store_true',
help="show debugging output")
# Grading
grading = parser.add_argument_group('grading options')
grading.add_argument('--lock', action='store_true',
help="lock the tests in a directory")
grading.add_argument('--score', action='store_true',
help="score the assignment")
grading.add_argument('--score-out', type=str,
nargs='?', const=None, default=None,
help="write scores to a file")
grading.add_argument('--config', type=str,
help="use a specific configuration file")
# Server parameters
server = parser.add_argument_group('server options')
server.add_argument('--local', action='store_true',
help="disable any network activity")
server.add_argument('--server', type=str,
default='okpy.org',
help="set the server address")
server.add_argument('--authenticate', action='store_true',
help="authenticate, ignoring previous authentication")
server.add_argument('--no-browser', action='store_true',
help="do not use a web browser for authentication")
server.add_argument('--get-token', action='store_true',
help="get ok access token")
server.add_argument('--insecure', action='store_true',
help="use http instead of https")
server.add_argument('--no-update', action='store_true',
help="do not check for ok updates")
server.add_argument('--update', action='store_true',
help="update ok and exit")
return parser.parse_args(command_input) | def parse_input(command_input=None) | Parses command line input. | 2.688277 | 2.678085 | 1.003806 |
args = parse_input()
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
log.debug(args)
# Checking user's Python bit version
bit_v = (8 * struct.calcsize("P"))
log.debug("Python {} ({}bit)".format(sys.version, bit_v))
if args.version:
print("okpy=={}".format(client.__version__))
exit(0)
elif args.update:
print("Current version: {}".format(client.__version__))
did_update = software_update.check_version(
args.server, client.__version__, client.FILE_NAME, timeout=10)
exit(not did_update) # exit with error if ok failed to update
assign = None
try:
if args.get_token:
access_token = auth.authenticate(args, force=True)
print("Token: {}".format(access_token))
exit(not access_token) # exit with error if no access_token
# Instantiating assignment
assign = assignment.load_assignment(args.config, args)
if args.tests:
print('Available tests:')
for name in assign.test_map:
print(' ' + name)
exit(0)
force_authenticate = args.authenticate
retry = True
while retry:
retry = False
if force_authenticate:
# Authenticate and check for success
if not assign.authenticate(force=True):
exit(1)
try:
msgs = messages.Messages()
for name, proto in assign.protocol_map.items():
log.info('Execute {}.run()'.format(name))
proto.run(msgs)
msgs['timestamp'] = str(datetime.now())
except ex.AuthenticationException as e:
if not force_authenticate:
force_authenticate = True
retry = True
elif not args.no_browser:
args.no_browser = True
retry = True
if retry:
msg = "without a browser" if args.no_browser else "with a browser"
log.warning('Authentication exception occurred; will retry {0}'.format(msg), exc_info=True)
print('Authentication error; will try to re-authenticate {0}...'.format(msg))
else:
raise # outer handler will be called
except ex.LoadingException as e:
log.warning('Assignment could not load', exc_info=True)
print('Error loading assignment: ' + str(e))
except ex.AuthenticationException as e:
log.warning('Authentication exception occurred', exc_info=True)
print('Authentication error: {0}'.format(e))
except ex.EarlyExit as e:
log.warning('OK exited early (non-error)')
print(str(e))
except ex.OkException as e:
log.warning('General OK exception occurred', exc_info=True)
print('Error: ' + str(e))
except KeyboardInterrupt:
log.info('KeyboardInterrupt received.')
finally:
if not args.no_update and not args.local:
try:
software_update.check_version(args.server, client.__version__,
client.FILE_NAME)
except KeyboardInterrupt:
pass
if assign:
assign.dump_tests() | def main() | Run all relevant aspects of ok.py. | 3.961001 | 3.863128 | 1.025335 |
if env is None:
import __main__
env = __main__.__dict__
messages = {}
tests = self._resolve_specified_tests([question], all_tests=False)
for test in tests:
try:
for suite in test.suites:
suite.skip_locked_cases = skip_locked_cases
suite.console.skip_locked_cases = skip_locked_cases
suite.console.hash_key = self.name
except AttributeError:
pass
test_name = tests[0].name
grade(tests, messages, env)
return messages['grading'][test_name] | def grade(self, question, env=None, skip_locked_cases=False) | Runs tests for a particular question. The setup and teardown will
always be executed.
question -- str; a question name (as would be entered at the command
line
env -- dict; an environment in which to execute the tests. If
None, uses the environment of __main__. The original
dictionary is never modified; each test is given a
duplicate of env.
skip_locked_cases -- bool; if False, locked cases will be tested
Returns: dict; maps question names (str) -> results (dict). The
results dictionary contains the following fields:
- "passed": int (number of test cases passed)
- "failed": int (number of test cases failed)
- "locked": int (number of test cases locked) | 4.704676 | 5.21891 | 0.901467 |
statistics = {}
statistics['time'] = str(datetime.now())
statistics['time-utc'] = str(datetime.utcnow())
statistics['unlock'] = self.args.unlock
if self.args.question:
statistics['question'] = [t.name for t in self.assignment.specified_tests]
statistics['requested-questions'] = self.args.question
if self.args.suite:
statistics['requested-suite'] = self.args.suite
if self.args.case:
statistics['requested-case'] = self.args.case
messages['analytics'] = statistics
self.log_run(messages) | def run(self, messages) | Returns some analytics about this autograder run. | 4.721349 | 3.978577 | 1.186693 |
line_num = len(contents.strip(' ').splitlines())
replace_marks = self.RE_REPLACE_MARK.findall(contents.strip())
if len(replace_marks) == line_num:
return False
return True | def replaced(self, contents) | For a question snippet containing some default code, return True if the
default code is replaced. Default code in a snippet should have
'\# Replace with your solution' at the end of each line. | 5.888922 | 5.265236 | 1.118454 |
# Load the contents of the local analytics file
history = self.read_history()
history['all_attempts'] += 1
# List of question names that the student asked to have graded
questions = messages['analytics'].get('question', [])
# The output of the grading protocol
grading = messages.get('grading')
# Attempt to figure out what the student is currently implementing
if not questions and grading:
# If questions are unspecified by the user, use the first failed test
failed = first_failed_test(self.assignment.specified_tests, grading)
logging.info('First failed test: {}'.format(failed))
if failed:
questions = [failed]
# Update question correctness status from previous attempts
for saved_q, details in history['questions'].items():
finished = details['solved']
if not finished and saved_q in grading:
scoring = grading[saved_q]
details['solved'] = is_correct(scoring)
# The question(s) that the student is testing right now.
history['question'] = questions
# Update attempt and correctness counts for the graded questions
for question in questions:
detail = history['questions']
if grading and question in grading:
scoring = is_correct(grading[question])
else:
scoring = False
# Update attempt counts or initialize counts
if question in history['questions']:
q_info = detail[question]
if grading and question in grading:
if q_info['solved'] != True:
q_info['solved'] = scoring
else:
continue # Already solved. Do not change total
q_info['attempts'] += 1
else:
detail[question] = {
'attempts': 1,
'solved': scoring
}
logging.info('Attempt %d for Question %s : %r',
history['questions'], question, scoring)
with open(self.ANALYTICS_FILE, 'wb') as f:
log.info('Saving history to %s', self.ANALYTICS_FILE)
pickle.dump(history, f)
os.fsync(f)
messages['analytics']['history'] = history | def log_run(self, messages) | Record this run of the autograder to a local file.
If the student does not specify what question(s) the student is
running ok against, assume that the student is aiming to work on
the question with the first failed test. If a student finishes
questions 1 - N-1, the first test to fail will be N. | 4.925526 | 4.567649 | 1.07835 |
try:
import ssl
except:
log.warning('Error importing SSL module', stack_info=True)
print(SSL_ERROR_MESSAGE)
sys.exit(1)
else:
log.info('SSL module is available')
return ssl | def check_ssl() | Attempts to import SSL or raises an exception. | 4.525827 | 4.011125 | 1.128319 |
if self.args.score or self.args.unlock or self.args.testing:
return
tests = self.assignment.specified_tests
for test in tests:
if self.args.suite and hasattr(test, 'suites'):
test.run_only = int(self.args.suite)
try:
suite = test.suites[int(self.args.suite) - 1]
except IndexError as e:
sys.exit(('python3 ok: error: '
'Suite number must be valid.({})'.format(len(test.suites))))
if self.args.case:
suite.run_only = [int(c) for c in self.args.case]
grade(tests, messages, env, verbose=self.args.verbose) | def run(self, messages, env=None) | Run gradeable tests and print results and return analytics.
RETURNS:
dict; a mapping of test name -> JSON-serializable object. It is up to
each test to determine what kind of data it wants to return as
significant for analytics. However, all tests must include the number
passed, the number of locked tests and the number of failed tests. | 5.851301 | 5.788573 | 1.010837 |
if not self.is_valid(value):
raise ex.SerializeException('{} is not a valid value for '
'type {}'.format(value, self.__class__.__name__))
return value | def coerce(self, value) | Subclasses should override this method for type coercion.
Default version will simply return the argument. If the argument
is not valid, a SerializeException is raised.
For primitives like booleans, ints, floats, and strings, use
this default version to avoid unintended type conversions. | 5.211858 | 4.051226 | 1.286489 |
if not self.is_valid(value):
raise ex.SerializeException('Invalid value: {}'.format(value))
return value | def to_json(self, value) | Subclasses should override this method for JSON encoding. | 6.668839 | 5.878847 | 1.134379 |
if response_headers['content-encoding'] == 'gzip':
buf = StringIO.StringIO(response_data)
zipbuf = gzip.GzipFile(fileobj=buf)
response_data = zipbuf.read()
elif response_headers['content-encoding'] == 'deflate':
data = StringIO.StringIO(zlib.decompress(response_data))
response_data = data.read()
else:
raise errors.TestError(
'Received unknown Content-Encoding',
{
'content-encoding':
str(response_headers['content-encoding']),
'function': 'http.HttpResponse.parse_content_encoding'
})
return response_data | def parse_content_encoding(self, response_headers, response_data) | Parses a response that contains Content-Encoding to retrieve
response_data | 2.607895 | 2.593892 | 1.005399 |
split_response = self.response.split(self.CRLF)
response_line = split_response[0]
response_headers = {}
response_data = None
data_line = None
for line_num in range(1, len(split_response[1:])):
# CRLF represents the start of data
if split_response[line_num] == '':
data_line = line_num + 1
break
else:
# Headers are all split by ':'
header = split_response[line_num].split(':', 1)
if len(header) != 2:
raise errors.TestError(
'Did not receive a response with valid headers',
{
'header_rcvd': str(header),
'function': 'http.HttpResponse.process_response'
})
response_headers[header[0].lower()] = header[1].lstrip()
if 'set-cookie' in response_headers.keys():
try:
cookie = Cookie.SimpleCookie()
cookie.load(response_headers['set-cookie'])
except Cookie.CookieError as err:
raise errors.TestError(
'Error processing the cookie content into a SimpleCookie',
{
'msg': str(err),
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
# if the check_for_cookie is invalid then we don't save it
if self.check_for_cookie(cookie) is False:
raise errors.TestError(
'An invalid cookie was specified',
{
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
else:
self.cookiejar.append((cookie, self.dest_addr))
if data_line is not None and data_line < len(split_response):
response_data = self.CRLF.join(split_response[data_line:])
# if the output headers say there is encoding
if 'content-encoding' in response_headers.keys():
response_data = self.parse_content_encoding(
response_headers, response_data)
if len(response_line.split(' ', 2)) != 3:
raise errors.TestError(
'The HTTP response line returned the wrong args',
{
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
try:
self.status = int(response_line.split(' ', 2)[1])
except ValueError:
raise errors.TestError(
'The status num of the response line isn\'t convertable',
{
'msg': 'This may be an HTTP 1.0 \'Simple Req\\Res\', it \
doesn\'t have HTTP headers and FTW will not parse these',
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
self.status_msg = response_line.split(' ', 2)[2]
self.version = response_line.split(' ', 2)[0]
self.response_line = response_line
self.headers = response_headers
self.data = response_data | def process_response(self) | Parses an HTTP response after an HTTP request is sent | 2.92733 | 2.896923 | 1.010496 |
self.request_object = http_request
self.build_socket()
self.build_request()
try:
self.sock.send(self.request)
except socket.error as err:
raise errors.TestError(
'We were unable to send the request to the socket',
{
'msg': err,
'function': 'http.HttpUA.send_request'
})
finally:
self.get_response() | def send_request(self, http_request) | Send a request and get response | 6.003728 | 5.783318 | 1.038111 |
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.SOCKET_TIMEOUT)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Check if TLS
if self.request_object.protocol == 'https':
self.sock = ssl.wrap_socket(self.sock, ciphers=self.CIPHERS)
self.sock.connect(
(self.request_object.dest_addr, self.request_object.port))
except socket.error as msg:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': msg,
'function': 'http.HttpUA.build_socket'
}) | def build_socket(self) | Generate either an HTTPS or HTTP socket | 2.522519 | 2.430244 | 1.037969 |
return_cookies = []
origin_domain = self.request_object.dest_addr
for cookie in self.cookiejar:
for cookie_morsals in cookie[0].values():
cover_domain = cookie_morsals['domain']
if cover_domain == '':
if origin_domain == cookie[1]:
return_cookies.append(cookie[0])
else:
# Domain match algorithm
bvalue = cover_domain.lower()
hdn = origin_domain.lower()
nend = hdn.find(bvalue)
if nend is not False:
return_cookies.append(cookie[0])
return return_cookies | def find_cookie(self) | Find a list of all cookies for a given domain | 5.855982 | 5.604659 | 1.044842 |
self.sock.setblocking(0)
our_data = []
# Beginning time
begin = time.time()
while True:
# If we have data then if we're passed the timeout break
if our_data and time.time() - begin > self.HTTP_TIMEOUT:
break
# If we're dataless wait just a bit
elif time.time() - begin > self.HTTP_TIMEOUT * 2:
break
# Recv data
try:
data = self.sock.recv(self.RECEIVE_BYTES)
if data:
our_data.append(data)
begin = time.time()
else:
# Sleep for sometime to indicate a gap
time.sleep(self.HTTP_TIMEOUT)
except socket.error as err:
# Check if we got a timeout
if err.errno == errno.EAGAIN:
pass
# SSL will return SSLWantRead instead of EAGAIN
elif sys.platform == 'win32' and \
err.errno == errno.WSAEWOULDBLOCK:
pass
elif (self.request_object.protocol == 'https' and
err[0] == ssl.SSL_ERROR_WANT_READ):
continue
# If we didn't it's an error
else:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': err,
'function': 'http.HttpUA.get_response'
})
if ''.join(our_data) == '':
raise errors.TestError(
'No response from server. Request likely timed out.',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'msg': 'Please send the request and check Wireshark',
'function': 'http.HttpUA.get_response'
})
self.response_object = HttpResponse(''.join(our_data), self)
try:
self.sock.shutdown(1)
self.sock.close()
except socket.error as err:
raise errors.TestError(
'We were unable to close the socket as expected.',
{
'msg': err,
'function': 'http.HttpUA.get_response'
}) | def get_response(self) | Get the response from the socket | 3.340277 | 3.26891 | 1.021832 |
return re.compile(self.output_dict[key]) if \
key in self.output_dict else None | def process_regex(self, key) | Extract the value of key from dictionary if available
and process it as a python regex | 6.386167 | 5.243279 | 1.217972 |
table_name = 'ftw'
col1 = 'rule_id'
col1_t = 'INTEGER'
col2 = 'test_id'
col2_t = 'STRING'
col3 = 'time_start'
col3_t = 'TEXT'
col4 = 'time_end'
col4_t = 'TEXT'
col5 = 'response_blob'
col5_t = 'TEXT'
col6 = 'status_code'
col6_t = 'INTEGER'
col7 = 'stage'
col7_t = 'INTEGER'
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
q = 'CREATE TABLE {tn}({col1} {col1_t},{col2} {col2_t},{col3} {col3_t},{col4} {col4_t},{col5} {col5_t},{col6} {col6_t},{col7} {col7_t})'.format(
tn=table_name,
col1=col1, col1_t=col1_t,
col2=col2, col2_t=col2_t,
col3=col3, col3_t=col3_t,
col4=col4, col4_t=col4_t,
col5=col5, col5_t=col5_t,
col6=col6, col6_t=col6_t,
col7=col7, col7_t=col7_t)
cur.execute(q)
conn.commit()
conn.close() | def instantiate_database(sqlite_file='ftwj.sqlite') | Create journal database for FTW runs | 1.594275 | 1.597182 | 0.99818 |
if os.path.isdir(ruledir) and recurse:
yaml_files = [y for x in os.walk(ruledir) for y in glob(os.path.join(x[0], '*.yaml'))]
elif os.path.isdir(ruledir) and not recurse:
yaml_files = get_files(ruledir, 'yaml')
elif os.path.isfile(ruledir):
yaml_files = [ruledir]
extracted_files = extract_yaml(yaml_files)
rulesets = []
for extracted_yaml in extracted_files:
rulesets.append(ruleset.Ruleset(extracted_yaml))
return rulesets | def get_rulesets(ruledir, recurse) | List of ruleset objects extracted from the yaml directory | 2.032198 | 1.934345 | 1.050587 |
loaded_yaml = []
for yaml_file in yaml_files:
try:
with open(yaml_file, 'r') as fd:
loaded_yaml.append(yaml.safe_load(fd))
except IOError as e:
print('Error reading file', yaml_file)
raise e
except yaml.YAMLError as e:
print('Error parsing file', yaml_file)
raise e
except Exception as e:
print('General error')
raise e
return loaded_yaml | def extract_yaml(yaml_files) | Take a list of yaml_files and load them to return back
to the testing program | 1.919222 | 1.979385 | 0.969605 |
instance, _ = cls.parse(raw_bytes, offset=0)
return instance | def deserialize(cls, raw_bytes) | Deserializes the given raw bytes into an instance.
Since this is a subclass of ``Part`` but a top-level one (i.e. no other
subclass of ``Part`` would have a ``Response`` as a part) this merely
has to parse the raw bytes and discard the resulting offset. | 10.194974 | 8.280951 | 1.231136 |
if not parts:
parts = self.parts
fmt = []
data = []
for name, part_class in parts:
if issubclass(part_class, Primitive):
part = part_class(getattr(self, name, None))
else:
part = getattr(self, name, None)
part_format, part_data = part.render()
fmt.extend(part_format)
data.extend(part_data)
return "".join(fmt), data | def render(self, parts=None) | Returns a two-element tuple with the ``struct`` format and values.
Iterates over the applicable sub-parts and calls `render()` on them,
accumulating the format string and values.
Optionally takes a subset of parts to render, default behavior is to
render all sub-parts belonging to the class. | 2.809573 | 2.564611 | 1.095516 |
values = {}
for name, part in cls.parts:
value, new_offset = part.parse(buff, offset)
values[name] = value
offset = new_offset
return cls(**values), offset | def parse(cls, buff, offset) | Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results. | 3.556154 | 2.844142 | 1.250344 |
while not self.closing:
try:
xid, zxid, response = await self.read_response()
except (ConnectionAbortedError, asyncio.CancelledError):
return
except Exception as e:
log.exception("Error reading response.")
self.abort()
return
payload_log.debug("[RECV] (xid: %s) %s", xid, response)
if xid == protocol.WATCH_XID:
self.watch_handler(response)
continue
elif xid in protocol.SPECIAL_XIDS:
f = self.pending_specials[xid].pop()
else:
f = self.pending.pop(xid)
if isinstance(response, Exception):
f.set_exception(response)
elif not f.cancelled():
f.set_result((zxid, response)) | async def read_loop(self) | Infinite loop that reads messages off of the socket while not closed.
When a message is received its corresponding pending Future is set
to have the message as its result.
This is never used directly and is fired as a separate callback on the
I/O loop via the `connect()` method. | 4.142767 | 4.097165 | 1.01113 |
log.warning("Aborting connection to %s:%s", self.host, self.port)
def abort_pending(f):
exc_info = sys.exc_info()
# TODO
log.debug('Abort pending: {}'.format(f))
if False and any(exc_info):
f.set_exc_info(exc_info)
else:
f.set_exception(exception(self.host, self.port))
for pending in self.drain_all_pending():
if pending.done() or pending.cancelled():
continue
abort_pending(pending) | def abort(self, exception=exc.ConnectError) | Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``). | 3.994862 | 3.610269 | 1.106527 |
primitive_struct = struct.Struct("!" + cls.fmt)
value = primitive_struct.unpack_from(buff, offset)[0]
offset += primitive_struct.size
return value, offset | def parse(cls, buff, offset) | Given a buffer and offset, returns the parsed value and new offset.
Uses the ``format`` class attribute to unpack the data from the buffer
and determine the used up number of bytes. | 4.010711 | 3.785847 | 1.059396 |
size_format = self.size_primitive.fmt
if self.value is None:
return size_format, [-1]
value = self.render_value(self.value)
size = len(value)
fmt = "%s%ds" % (size_format, size)
return fmt, [size, value] | def render(self) | Returns the ``struct`` format and list of the size and value.
The format is derived from the size primitive and the length of the
resulting encoded value (e.g. the format for a string of 'foo' ends
up as 'h3s'.
.. note ::
The value is expected to be string-able (wrapped in ``str()``) and is
then encoded as UTF-8. | 5.95446 | 3.629657 | 1.640502 |
size, offset = cls.size_primitive.parse(buff, offset)
if size == -1:
return None, offset
var_struct = struct.Struct("!%ds" % size)
value = var_struct.unpack_from(buff, offset)[0]
value = cls.parse_value(value)
offset += var_struct.size
return value, offset | def parse(cls, buff, offset) | Given a buffer and offset, returns the parsed value and new offset.
Parses the ``size_primitive`` first to determine how many more bytes to
consume to extract the value. | 3.14444 | 2.588947 | 1.214564 |
copy = type(
"VectorOf%s" % part_class.__name__,
cls.__bases__, dict(cls.__dict__)
)
copy.item_class = part_class
return copy | def of(cls, part_class) | Creates a new class with the ``item_class`` attribute properly set. | 5.6963 | 4.329584 | 1.315669 |
value = self.value
if value is None:
value = []
fmt = [Int.fmt]
data = [len(value)]
for item_value in value:
if issubclass(self.item_class, Primitive):
item = self.item_class(item_value)
else:
item = item_value
item_format, item_data = item.render()
fmt.extend(item_format)
data.extend(item_data)
return "".join(fmt), data | def render(self) | Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well. | 3.449346 | 3.080627 | 1.119689 |
count, offset = Int.parse(buff, offset)
values = []
for _ in range(count):
value, new_offset = cls.item_class.parse(buff, offset)
values.append(value)
offset = new_offset
return values, offset | def parse(cls, buff, offset) | Parses a raw buffer at offset and returns the resulting array value.
Starts off by `parse()`-ing the 32-bit element count, followed by
parsing items out of the buffer "count" times. | 3.197858 | 3.253551 | 0.982882 |
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation | def round_robin(members, items) | Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts. | 3.46862 | 3.950937 | 0.877923 |
if start <= stop:
stop, step = stop + 1, abs(step)
else:
stop, step = stop - 1, -abs(step)
if maxSize >= 0:
size = lenRange(start, stop, step)
if size > maxSize:
raise exceptions.MaxSizeException(
"Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize))
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
return (f for f in xrange(start, stop, step)) | def xfrange(start, stop, step=1, maxSize=-1) | Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded | 5.828786 | 5.54236 | 1.051679 |
_add = seen.add
# return a generator of the unique items and the set of the seen items
# the seen set will mutate when the generator is iterated over
return (i for i in chain(*iterables) if i not in seen and not _add(i)) | def unique(seen, *iterables) | Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator: | 5.63679 | 5.634839 | 1.000346 |
fs = self.__class__.__new__(self.__class__)
fs.__dict__ = self.__dict__.copy()
fs._frameSet = None
if self._frameSet is not None:
fs._frameSet = self._frameSet.copy()
return fs | def copy(self) | Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`: | 3.141907 | 3.395976 | 0.925185 |
# Potentially expensive if inverted range is large
# and user never asked for it in template
inverted = (self.invertedFrameRange() or "") if "{inverted}" in template else ""
return template.format(
basename=self.basename(),
extension=self.extension(), start=self.start(),
end=self.end(), length=len(self),
padding=self.padding(),
range=self.frameRange() or "",
inverted=inverted,
dirname=self.dirname()) | def format(self, template="{basename}{range}{padding}{extension}") | Return the file sequence as a formatted string according to
the given template.
Utilizes the python string format syntax. Available keys include:
* basename - the basename of the sequence.
* extension - the file extension of the sequence.
* start - the start frame.
* end - the end frame.
* length - the length of the frame range.
* padding - the detecting amount of padding.
* inverted - the inverted frame range. (returns "" if none)
* dirname - the directory name.
If asking for the inverted range value, and the new inverted range
exceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException``
will be raised.
Args:
template (str):
Returns:
str:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds
:const:`fileseq.constants.MAX_FRAME_SIZE` | 7.227824 | 5.619217 | 1.286269 |
result = []
for frange in self.frameRange().split(","):
result.append(FileSequence(''.join(
(self._dir, self._base, frange, self._pad, self._ext))))
return result | def split(self) | Split the :class:`FileSequence` into contiguous pieces and return them
as a list of :class:`FileSequence` instances.
Returns:
list[:class:`FileSequence`]: | 10.857419 | 8.931737 | 1.2156 |
# Make sure the dirname always ends in
# a path separator character
sep = utils._getPathSep(dirname)
if not dirname.endswith(sep):
dirname += sep
self._dir = utils.asString(dirname) | def setDirname(self, dirname) | Set a new directory name for the sequence.
Args:
dirname (str): the new directory name | 9.571766 | 10.613685 | 0.901833 |
self._pad = padding
self._zfill = self.__class__.getPaddingNum(self._pad) | def setPadding(self, padding) | Set new padding characters for the sequence.
i.e. "#" or "@@@" or '%04d', or an empty string to disable range formatting.
Args:
padding (str): sequence padding to set | 12.27629 | 15.042529 | 0.816105 |
if ext[0] != ".":
ext = "." + ext
self._ext = utils.asString(ext) | def setExtension(self, ext) | Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension | 6.951852 | 9.331054 | 0.745023 |
import warnings
msg = "the setExtention method is deprecated, please use setExtension"
warnings.warn(msg)
self.setExtension(ext) | def setExtention(self, ext) | Deprecated: use :meth:`setExtension`.
Args:
ext (str): | 4.578524 | 4.337776 | 1.0555 |
try:
zframe = str(int(frame)).zfill(self._zfill)
except ValueError:
zframe = frame
# There may have been no placeholder for frame IDs in
# the sequence, in which case we don't want to insert
# a frame ID
if self._zfill == 0:
zframe = ""
return "".join((self._dir, self._base, zframe, self._ext)) | def frame(self, frame) | Return a path go the given frame in the sequence. Integer or string
digits are treated as a frame number and padding is applied, all other
values are passed though.
Examples:
>>> seq.frame(1)
/foo/bar.0001.exr
>>> seq.frame("#")
/foo/bar.#.exr
Args:
frame (int or str): the desired frame number or a char to pass
through (ie. #)
Returns:
str: | 6.406189 | 6.613554 | 0.968646 |
seqs = {}
_check = DISK_RE.match
for match in ifilter(None, imap(_check, imap(utils.asString, paths))):
dirname, basename, frame, ext = match.groups()
if not basename and not ext:
continue
key = (dirname, basename, ext)
seqs.setdefault(key, set())
if frame:
seqs[key].add(frame)
for (dirname, basename, ext), frames in seqs.iteritems():
# build the FileSequence behind the scenes, rather than dupe work
seq = FileSequence.__new__(FileSequence)
seq._dir = dirname or ''
seq._base = basename or ''
seq._ext = ext or ''
if frames:
seq._frameSet = FrameSet(set(imap(int, frames))) if frames else None
seq._pad = FileSequence.getPaddingChars(min(imap(len, frames)))
else:
seq._frameSet = None
seq._pad = ''
seq.__init__(str(seq))
yield seq | def yield_sequences_in_list(paths) | Yield the discrete sequences within paths. This does not try to
determine if the files actually exist on disk, it assumes you already
know that.
Args:
paths (list[str]): a list of paths
Yields:
:obj:`FileSequence`: | 4.980913 | 5.14261 | 0.968557 |
# reserve some functions we're going to need quick access to
_not_hidden = lambda f: not f.startswith('.')
_match_pattern = None
_filter_padding = None
_join = os.path.join
seq = None
dirpath = pattern
# Support the pattern defining a filter for the files
# in the existing directory
if not os.path.isdir(pattern):
dirpath, filepat = os.path.split(pattern)
if not os.path.isdir(dirpath):
return []
# Start building a regex for filtering files
seq = cls(filepat)
patt = seq.basename().replace('.', r'\.')
if seq.padding():
patt += '\d+'
if seq.extension():
patt += seq.extension()
# Convert braces groups into regex capture groups
view = bytearray(patt)
matches = re.finditer(r'{(.*?)(?:,(.*?))*}', patt)
for match in reversed(list(matches)):
i, j = match.span()
view[i:j] = '(%s)' % '|'.join([m.strip() for m in match.groups()])
view = view.replace('*', '.*')
view = view.replace('?', '.')
view += '$'
try:
_match_pattern = re.compile(str(view)).match
except re.error:
msg = 'Invalid file pattern: {}'.format(filepat)
raise FileSeqException(msg)
if seq.padding() and strictPadding:
_filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill())
# Get just the immediate files under the dir.
# Avoids testing the os.listdir() for files as
# a second step.
ret = next(os.walk(dirpath), None)
files = ret[-1] if ret else []
# collapse some generators to get us the files that match our regex
if not include_hidden:
files = ifilter(_not_hidden, files)
# Filter by files that match the provided file pattern
if _match_pattern:
files = ifilter(_match_pattern, files)
# Filter by files that match the frame padding in the file pattern
if _filter_padding:
# returns a generator
files = _filter_padding(files)
# Ensure our dirpath ends with a path separator, so
# that we can control which sep is used during the
# os.path.join
sep = utils._getPathSep(dirpath)
if not dirpath.endswith(sep):
dirpath += sep
files = (_join(dirpath, f) for f in files)
files = list(files)
seqs = list(FileSequence.yield_sequences_in_list(files))
if _filter_padding and seq:
pad = cls.conformPadding(seq.padding())
# strict padding should preserve the original padding
# characters in the found sequences.
for s in seqs:
s.setPadding(pad)
return seqs | def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False) | Yield the sequences found in the given directory.
Examples:
>>> findSequencesOnDisk('/path/to/files')
The `pattern` can also specify glob-like shell wildcards including the following:
* ``?`` - 1 wildcard character
* ``*`` - 1 or more wildcard character
* ``{foo,bar}`` - either 'foo' or 'bar'
Exact frame ranges are not considered, and padding characters are converted to
wildcards (``#`` or ``@``)
Examples:
>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')
>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)
Args:
pattern (str): directory to scan, or pattern to filter in directory
include_hidden (bool): if true, show .hidden files as well
strictPadding (bool): if True, ignore files with padding length different from pattern
Returns:
list: | 5.232605 | 5.21667 | 1.003055 |
seq = cls(pattern)
if seq.frameRange() == '' and seq.padding() == '':
if os.path.isfile(pattern):
return seq
patt = seq.format('{dirname}{basename}*{extension}')
ext = seq.extension()
basename = seq.basename()
pad = seq.padding()
globbed = iglob(patt)
if pad and strictPadding:
globbed = cls._filterByPaddingNum(globbed, seq.zfill())
pad = cls.conformPadding(pad)
matches = cls.yield_sequences_in_list(globbed)
for match in matches:
if match.basename() == basename and match.extension() == ext:
if pad and strictPadding:
match.setPadding(pad)
return match
msg = 'no sequence found on disk matching {0}'
raise FileSeqException(msg.format(pattern)) | def findSequenceOnDisk(cls, pattern, strictPadding=False) | Search for a specific sequence on disk.
The padding characters used in the `pattern` are used to filter the
frame values of the files on disk (if `strictPadding` is True).
Examples:
Find sequence matching basename and extension, and a wildcard for
any frame.
returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive
>>> findSequenceOnDisk("seq/bar@@@@.exr")
Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr
returns only frames bar1000.exr through bar9999.exr
>>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True)
Args:
pattern (str): the sequence pattern being searched for
strictPadding (bool): if True, ignore files with padding length different from `pattern`
Returns:
str:
Raises:
:class:`.FileSeqException`: if no sequence is found on disk | 5.114039 | 5.28493 | 0.967664 |
_check = DISK_RE.match
for item in iterable:
# Add a filter for paths that don't match the frame
# padding of a given number
matches = _check(item)
if not matches:
if num <= 0:
# Not a sequence pattern, but we were asked
# to match on a zero padding
yield item
continue
frame = matches.group(3) or ''
if not frame:
if num <= 0:
# No frame value was parsed, but we were asked
# to match on a zero padding
yield item
continue
# We have a frame number
if frame[0] == '0' or frame[:2] == '-0':
if len(frame) == num:
# A frame leading with '0' is explicitly
# padded and can only be a match if its exactly
# the target padding number
yield item
continue
if len(frame) >= num:
# A frame that does not lead with '0' can match
# a padding width >= to the target padding number
yield item
continue | def _filterByPaddingNum(cls, iterable, num) | Yield only path elements from iterable which have a frame
padding that matches the given target padding number
Args:
iterable (collections.Iterable):
num (int):
Yields:
str: | 6.317551 | 6.309972 | 1.001201 |
match = PRINTF_SYNTAX_PADDING_RE.match(chars)
if match:
return int(match.group(1))
try:
return sum([PAD_MAP[char] for char in chars])
except KeyError:
msg = "Detected an unsupported padding character: \"{}\"."
msg += " Supported padding characters: {} or printf syntax padding"
msg += " %<int>d"
raise ValueError(msg.format(char, str(PAD_MAP.keys()))) | def getPaddingNum(chars) | Given a supported group of padding characters, return the amount of padding.
Args:
chars (str): a supported group of padding characters
Returns:
int:
Raises:
ValueError: if unsupported padding character is detected | 6.081882 | 6.167263 | 0.986156 |
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | def conformPadding(cls, chars) | Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters | 6.817278 | 9.413864 | 0.724174 |
return FrameSet(sorted(frames) if sort else frames) | def from_iterable(cls, frames, sort=False) | Build a :class:`FrameSet` from an iterable of frames.
Args:
frames (collections.Iterable): an iterable object containing frames as integers
sort (bool): True to sort frames before creation, default is False
Returns:
:class:`FrameSet`: | 8.256234 | 15.514573 | 0.53216 |
if isinstance(other, FrameSet):
return other
try:
return FrameSet(other)
except Exception:
return NotImplemented | def _cast_to_frameset(cls, other) | Private method to simplify comparison operations.
Args:
other (:class:`FrameSet` or set or frozenset or or iterable): item to be compared
Returns:
:class:`FrameSet`
Raises:
:class:`NotImplemented`: if a comparison is impossible | 3.742659 | 4.62711 | 0.808855 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.