file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
bot.js | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// bot.js is your main bot dialog entry point for handling activity types
// Import required Bot Builder
const { ActionTypes, ActivityTypes, CardFactory } = require('botbuilder');
const { LuisRecognizer } = require('botbuilder-ai');
const { DialogSet, WaterfallDialog } = require('botbuilder-dialogs');
const { OAuthHelpers, LOGIN_PROMPT } = require('./oauth-helpers');
const CONNECTION_SETTING_NAME = '<MS Graph API Connection Name>';
/**
* Demonstrates the following concepts:
* Displaying a Welcome Card, using Adaptive Card technology
* Use LUIS to model Greetings, Help, and Cancel interactions
* Use a Waterfall dialog to model multi-turn conversation flow
* Use custom prompts to validate user input
* Store conversation and user state
* Handle conversation interruptions
*/
let luisResult = null;
class BasicBot {
/**
* Constructs the three pieces necessary for this bot to operate:
* 1. StatePropertyAccessor for conversation state
* 2. StatePropertyAccess for user state
* 3. LUIS client
* 4. DialogSet to handle our GreetingDialog
*
* @param {ConversationState} conversationState property accessor
* @param {application} LUISApplication property accessor
* @param {luisPredictionOptions} PredictionOptions property accessor
* @param {includeApiResults} APIResults Application property accessor
*/
constructor(conversationState, application, luisPredictionOptions, includeApiResults) {
this.luisRecognizer = new LuisRecognizer(application,luisPredictionOptions, true);
this.conversationState = conversationState;
// DialogState property accessor. Used to keep persist DialogState when using DialogSet.
this.dialogState = conversationState.createProperty('dialogState');
this.commandState = conversationState.createProperty('commandState');
| this.helpMessage = `You can type "send <recipient_email>" to send an email, "recent" to view recent unread mail,` +
` "me" to see information about your, or "help" to view the commands` +
` again. For others LUIS displays intent with score.`;
// Create a DialogSet that contains the OAuthPrompt.
this.dialogs = new DialogSet(this.dialogState);
// Add an OAuthPrompt with the connection name as specified on the Bot's settings blade in Azure.
this.dialogs.add(OAuthHelpers.prompt(CONNECTION_SETTING_NAME));
this._graphDialogId = 'graphDialog';
// Logs in the user and calls proceeding dialogs, if login is successful.
this.dialogs.add(new WaterfallDialog(this._graphDialogId, [
this.promptStep.bind(this),
this.processStep.bind(this)
]));
}
/**
* Driver code that does one of the following:
* 1. Display a welcome card upon receiving ConversationUpdate activity
* 2. Use LUIS to recognize intents for incoming user message
* 3. Start a greeting dialog
* 4. Optionally handle Cancel or Help interruptions
*
* @param {Context} turnContext turn context from the adapter
*/
async onTurn(turnContext) {
const dc = await this.dialogs.createContext(turnContext);
const results = await this.luisRecognizer.recognize(turnContext);
switch (turnContext._activity.type) {
case ActivityTypes.Message:
this.luisResult = results;
await this.processInput(dc);
break;
case ActivityTypes.Event:
case ActivityTypes.Invoke:
if (turnContext._activity.type === ActivityTypes.Invoke && turnContext._activity.channelId !== 'msteams') {
throw new Error('The Invoke type is only valid on the MS Teams channel.');
};
await dc.continueDialog();
if (!turnContext.responded) {
await dc.beginDialog(this._graphDialogId);
};
break;
case ActivityTypes.ConversationUpdate:
await this.sendWelcomeMessage(turnContext);
break;
default:
await turnContext.sendActivity(`[${ turnContext._activity.type }]-type activity detected.`);
}
await this.conversationState.saveChanges(turnContext);
}
async sendWelcomeMessage(turnContext) {
const activity = turnContext.activity;
if (activity && activity.membersAdded) {
const heroCard = CardFactory.heroCard(
'Welcome to LUIS with MSGraph API Authentication BOT!',
CardFactory.images(['https://botframeworksamples.blob.core.windows.net/samples/aadlogo.png']),
CardFactory.actions([
{
type: ActionTypes.ImBack,
title: 'Log me in',
value: 'login'
},
{
type: ActionTypes.ImBack,
title: 'Me',
value: 'me'
},
{
type: ActionTypes.ImBack,
title: 'Recent',
value: 'recent'
},
{
type: ActionTypes.ImBack,
title: 'View Token',
value: 'viewToken'
},
{
type: ActionTypes.ImBack,
title: 'Help',
value: 'help'
},
{
type: ActionTypes.ImBack,
title: 'Signout',
value: 'signout'
}
])
);
for (const idx in activity.membersAdded) {
if (activity.membersAdded[idx].id !== activity.recipient.id) {
await turnContext.sendActivity({ attachments: [heroCard] });
}
}
}
}
async processInput(dc, luisResult) {
//console.log(dc);
switch (dc.context.activity.text.toLowerCase()) {
case 'signout':
case 'logout':
case 'signoff':
case 'logoff':
// The bot adapter encapsulates the authentication processes and sends
// activities to from the Bot Connector Service.
const botAdapter = dc.context.adapter;
await botAdapter.signOutUser(dc.context, CONNECTION_SETTING_NAME);
// Let the user know they are signed out.
await dc.context.sendActivity('You are now signed out.');
break;
case 'help':
await dc.context.sendActivity(this.helpMessage);
break;
default:
// The user has input a command that has not been handled yet,
// begin the waterfall dialog to handle the input.
await dc.continueDialog();
if (!dc.context.responded) {
await dc.beginDialog(this._graphDialogId);
}
}
};
async promptStep(step) {
const activity = step.context.activity;
if (activity.type === ActivityTypes.Message && !(/\d{6}/).test(activity.text)) {
await this.commandState.set(step.context, activity.text);
await this.conversationState.saveChanges(step.context);
}
return await step.beginDialog(LOGIN_PROMPT);
}
async processStep(step) {
//console.log(step);
// We do not need to store the token in the bot. When we need the token we can
// send another prompt. If the token is valid the user will not need to log back in.
// The token will be available in the Result property of the task.
const tokenResponse = step.result;
// If the user is authenticated the bot can use the token to make API calls.
if (tokenResponse !== undefined) {
let parts = await this.commandState.get(step.context);
if (!parts) {
parts = step.context.activity.text;
}
const command = parts.split(' ')[0].toLowerCase();
console.log(command);
if(command === 'login' || command === 'signin'){
await step.context.sendActivity(`You have already loggedin!`);
}
else if (command === 'me') {
await OAuthHelpers.listMe(step.context, tokenResponse);
} else if (command === 'send') {
await OAuthHelpers.sendMail(step.context, tokenResponse, parts.split(' ')[1].toLowerCase());
} else if (command === 'recent') {
await OAuthHelpers.listRecentMail(step.context, tokenResponse);
} else if(command.toLowerCase() === 'viewtoken'){
await step.context.sendActivity(`Your token is: ${ tokenResponse.token }`);
}else{
console.log(this.luisResult);
const topIntent = this.luisResult.luisResult.topScoringIntent;
if(topIntent !== 'None'){
await step.context.sendActivity(`LUIS Top Scoring Intent: ${ topIntent.intent }, Score: ${ topIntent.score }`);
}else{
await step.context.sendActivity(`Please try something else!`);
// If the top scoring intent was "None" tell the user no valid intents were found and provide help.
// await step.context.sendActivity(`No LUIS intents were found.
// \nThis sample is about identifying two user intents:
// \n - 'Calendar.Add'
// \n - 'Calendar.Find'
// \nTry typing 'Add Event' or 'Show me tomorrow'.`);
}
}
} else {
// Ask the user to try logging in later as they are not logged in.
await step.context.sendActivity(`We couldn't log you in. Please try again later.`);
}
return await step.endDialog();
};
};
exports.BasicBot = BasicBot; | // Instructions for the user with information about commands that this bot may handle. | random_line_split |
bot.js | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// bot.js is your main bot dialog entry point for handling activity types
// Import required Bot Builder
const { ActionTypes, ActivityTypes, CardFactory } = require('botbuilder');
const { LuisRecognizer } = require('botbuilder-ai');
const { DialogSet, WaterfallDialog } = require('botbuilder-dialogs');
const { OAuthHelpers, LOGIN_PROMPT } = require('./oauth-helpers');
const CONNECTION_SETTING_NAME = '<MS Graph API Connection Name>';
/**
* Demonstrates the following concepts:
* Displaying a Welcome Card, using Adaptive Card technology
* Use LUIS to model Greetings, Help, and Cancel interactions
* Use a Waterfall dialog to model multi-turn conversation flow
* Use custom prompts to validate user input
* Store conversation and user state
* Handle conversation interruptions
*/
let luisResult = null;
class BasicBot {
/**
* Constructs the three pieces necessary for this bot to operate:
* 1. StatePropertyAccessor for conversation state
* 2. StatePropertyAccess for user state
* 3. LUIS client
* 4. DialogSet to handle our GreetingDialog
*
* @param {ConversationState} conversationState property accessor
* @param {application} LUISApplication property accessor
* @param {luisPredictionOptions} PredictionOptions property accessor
* @param {includeApiResults} APIResults Application property accessor
*/
constructor(conversationState, application, luisPredictionOptions, includeApiResults) {
this.luisRecognizer = new LuisRecognizer(application,luisPredictionOptions, true);
this.conversationState = conversationState;
// DialogState property accessor. Used to keep persist DialogState when using DialogSet.
this.dialogState = conversationState.createProperty('dialogState');
this.commandState = conversationState.createProperty('commandState');
// Instructions for the user with information about commands that this bot may handle.
this.helpMessage = `You can type "send <recipient_email>" to send an email, "recent" to view recent unread mail,` +
` "me" to see information about your, or "help" to view the commands` +
` again. For others LUIS displays intent with score.`;
// Create a DialogSet that contains the OAuthPrompt.
this.dialogs = new DialogSet(this.dialogState);
// Add an OAuthPrompt with the connection name as specified on the Bot's settings blade in Azure.
this.dialogs.add(OAuthHelpers.prompt(CONNECTION_SETTING_NAME));
this._graphDialogId = 'graphDialog';
// Logs in the user and calls proceeding dialogs, if login is successful.
this.dialogs.add(new WaterfallDialog(this._graphDialogId, [
this.promptStep.bind(this),
this.processStep.bind(this)
]));
}
/**
* Driver code that does one of the following:
* 1. Display a welcome card upon receiving ConversationUpdate activity
* 2. Use LUIS to recognize intents for incoming user message
* 3. Start a greeting dialog
* 4. Optionally handle Cancel or Help interruptions
*
* @param {Context} turnContext turn context from the adapter
*/
async onTurn(turnContext) {
const dc = await this.dialogs.createContext(turnContext);
const results = await this.luisRecognizer.recognize(turnContext);
switch (turnContext._activity.type) {
case ActivityTypes.Message:
this.luisResult = results;
await this.processInput(dc);
break;
case ActivityTypes.Event:
case ActivityTypes.Invoke:
if (turnContext._activity.type === ActivityTypes.Invoke && turnContext._activity.channelId !== 'msteams') {
throw new Error('The Invoke type is only valid on the MS Teams channel.');
};
await dc.continueDialog();
if (!turnContext.responded) {
await dc.beginDialog(this._graphDialogId);
};
break;
case ActivityTypes.ConversationUpdate:
await this.sendWelcomeMessage(turnContext);
break;
default:
await turnContext.sendActivity(`[${ turnContext._activity.type }]-type activity detected.`);
}
await this.conversationState.saveChanges(turnContext);
}
async sendWelcomeMessage(turnContext) {
const activity = turnContext.activity;
if (activity && activity.membersAdded) {
const heroCard = CardFactory.heroCard(
'Welcome to LUIS with MSGraph API Authentication BOT!',
CardFactory.images(['https://botframeworksamples.blob.core.windows.net/samples/aadlogo.png']),
CardFactory.actions([
{
type: ActionTypes.ImBack,
title: 'Log me in',
value: 'login'
},
{
type: ActionTypes.ImBack,
title: 'Me',
value: 'me'
},
{
type: ActionTypes.ImBack,
title: 'Recent',
value: 'recent'
},
{
type: ActionTypes.ImBack,
title: 'View Token',
value: 'viewToken'
},
{
type: ActionTypes.ImBack,
title: 'Help',
value: 'help'
},
{
type: ActionTypes.ImBack,
title: 'Signout',
value: 'signout'
}
])
);
for (const idx in activity.membersAdded) {
if (activity.membersAdded[idx].id !== activity.recipient.id) {
await turnContext.sendActivity({ attachments: [heroCard] });
}
}
}
}
async processInput(dc, luisResult) | await dc.continueDialog();
if (!dc.context.responded) {
await dc.beginDialog(this._graphDialogId);
}
}
}
;
async promptStep(step) {
const activity = step.context.activity;
if (activity.type === ActivityTypes.Message && !(/\d{6}/).test(activity.text)) {
await this.commandState.set(step.context, activity.text);
await this.conversationState.saveChanges(step.context);
}
return await step.beginDialog(LOGIN_PROMPT);
}
async processStep(step) {
//console.log(step);
// We do not need to store the token in the bot. When we need the token we can
// send another prompt. If the token is valid the user will not need to log back in.
// The token will be available in the Result property of the task.
const tokenResponse = step.result;
// If the user is authenticated the bot can use the token to make API calls.
if (tokenResponse !== undefined) {
let parts = await this.commandState.get(step.context);
if (!parts) {
parts = step.context.activity.text;
}
const command = parts.split(' ')[0].toLowerCase();
console.log(command);
if(command === 'login' || command === 'signin'){
await step.context.sendActivity(`You have already loggedin!`);
}
else if (command === 'me') {
await OAuthHelpers.listMe(step.context, tokenResponse);
} else if (command === 'send') {
await OAuthHelpers.sendMail(step.context, tokenResponse, parts.split(' ')[1].toLowerCase());
} else if (command === 'recent') {
await OAuthHelpers.listRecentMail(step.context, tokenResponse);
} else if(command.toLowerCase() === 'viewtoken'){
await step.context.sendActivity(`Your token is: ${ tokenResponse.token }`);
}else{
console.log(this.luisResult);
const topIntent = this.luisResult.luisResult.topScoringIntent;
if(topIntent !== 'None'){
await step.context.sendActivity(`LUIS Top Scoring Intent: ${ topIntent.intent }, Score: ${ topIntent.score }`);
}else{
await step.context.sendActivity(`Please try something else!`);
// If the top scoring intent was "None" tell the user no valid intents were found and provide help.
// await step.context.sendActivity(`No LUIS intents were found.
// \nThis sample is about identifying two user intents:
// \n - 'Calendar.Add'
// \n - 'Calendar.Find'
// \nTry typing 'Add Event' or 'Show me tomorrow'.`);
}
}
} else {
// Ask the user to try logging in later as they are not logged in.
await step.context.sendActivity(`We couldn't log you in. Please try again later.`);
}
return await step.endDialog();
};
};
exports.BasicBot = BasicBot;
| {
//console.log(dc);
switch (dc.context.activity.text.toLowerCase()) {
case 'signout':
case 'logout':
case 'signoff':
case 'logoff':
// The bot adapter encapsulates the authentication processes and sends
// activities to from the Bot Connector Service.
const botAdapter = dc.context.adapter;
await botAdapter.signOutUser(dc.context, CONNECTION_SETTING_NAME);
// Let the user know they are signed out.
await dc.context.sendActivity('You are now signed out.');
break;
case 'help':
await dc.context.sendActivity(this.helpMessage);
break;
default:
// The user has input a command that has not been handled yet,
// begin the waterfall dialog to handle the input. | identifier_body |
parser_manager.py | FocusStack
from CrystalMatch.dls_imagematch import logconfig
from CrystalMatch.dls_imagematch.service import readable_config_dir
from CrystalMatch.dls_imagematch.version import VersionHandler
from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir
from CrystalMatch.dls_util.shape import Point
from CrystalMatch.dls_util.imaging import Image
class ParserManager:
LOG_DIR_PERMISSION = 0o777
LOG_DIR_NAME = 'logs'
LOG_FILE_NAME = 'log'
FOCUSED_IMAGE_NAME = 'processed.tif'
DEFAULT_SCRIPT_PATH = '.CrystalMatch'
def __init__(self):
self.parser = None
self.images_to_stack = None
self._script_path = None
def build_parser(self):
"""Return an argument parser for the Crystal Matching service.
:return: Argument parser.
"""
parser = argparse.ArgumentParser(
description="Run Crystal Matching algorithm attempting to translate co-ordinates "
"on an input image to the coordinate-space of an output image while "
"accounting for possible movement of crystals in the sample.")
if sys.version_info[0] < 3:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=file,
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
else:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=argparse.FileType('r'),
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
parser.add_argument('beamline_stack_path',
metavar="beamline_stack_path",
help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.")
parser.add_argument('selected_points',
metavar="x,y",
nargs='*',
help="Comma-separated co-ordinates of selected points to be translated from the marked image "
"to the target image.")
parser.add_argument('-o','--output',
metavar="focused_image_path",
help="Specify directory for the stacked image. "
"A file called 'processed.tif' will be created in the directory."
"'processed.tif' will be created in log directory if this is not set.")
parser.add_argument('--config',
metavar="path",
action=ReadableConfigDir,
default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),
help="Sets the configuration directory.")
parser.add_argument('--scale',
metavar="scale",
help="The scale between the Formulatrix and beamline image given as the resolution of each "
"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value "
"must be specified for each image using the format "
"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.")
parser.add_argument('-j', '--job',
metavar="job_id",
help="Specify a job_id - this will be reported in the output to help identify this run.")
parser.add_argument('--to_json',
action='store_true',
help="Output a JSON object.")
parser.add_argument('--version',
action='version',
version=VersionHandler.version_string())
parser.add_argument('--log',
metavar="path",
help="Write log files to the directory specified by path.")
self.parser = parser
def get_args(self):
return self.parser.parse_args()
def get_config_dir(self):
config_directory = self.get_args().config
if config_directory is None:
config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME))
return abspath(config_directory)
def get_scale_override(self):
scale = self.get_args().scale
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
if scale is not None:
try:
scales = scale.split(":")
assert (len(scales) == 2)
return float(scales[0]), float(scales[1])
except AssertionError:
log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale)))
raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale))
except ValueError:
log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
raise ValueError(
"Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
return None
def parse_selected_points_from_args(self):
"""Parse the selected points list provided by the command line for validity and returns a list of Point objects.
:param args: Command line arguments provided by argument parser - must contain 'selected_points'
:return: List of Selected Points.
"""
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
selected_points = []
if self.get_args().selected_points:
point_expected_format = re.compile("[0-9]+,[0-9]+")
sel_points = self.get_args().selected_points
for point_string in self.get_args().selected_points:
point_string = point_string.strip('()')
match_results = point_expected_format.match(point_string)
# Check the regex matches the entire string
# DEV NOTE: can use re.full_match in Python v3
if match_results is not None and match_results.span()[1] == len(point_string):
x, y = map(int, point_string.strip('()').split(','))
selected_points.append(Point(x, y))
else:
log.warning("Selected point with invalid format will be ignored - '" + point_string + "'")
return selected_points
def get_focused_image(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
files = self._sort_files_according_to_names(focusing_path)
# Run focusstack
stacker = FocusStack(files, self.get_args().config)
focused_image = stacker.composite()
self.images_to_stack = stacker.get_fft_images_to_stack()
else:
focused_image = Image(cv2.imread(focusing_path))
return focused_image
def get_fft_images_to_stack(self):
|
def get_formulatrix_image_path(self):
path = self.get_args().Formulatrix_image.name
self._check_is_file(path)
return path
def get_to_json(self):
return self.get_args().to_json
def get_job_id(self):
return self.get_args().job
# returns an error if the focused image is not saved
# may want to change this for saving done later
def get_focused_image_path(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
focusing_path = self.get_out_file_path()
self._check_is_file(focusing_path)
return abspath(focusing_path)
def save_focused_image(self, image):
image.save(self.get_out_file_path())
def get_out_file_path(self):
"""
Get the path to the output file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_output_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.FOCUSED_IMAGE_NAME)
def get_log_file_path(self):
"""
Get the path to the log file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_log_file_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.LOG_FILE_NAME)
def _get_output_dir(self):
out = self.get_args().output
if out is None:
# default - log file directory
default_output_path = self._get_log_file_dir()
return default_output_path
return abspath(self.get_args().output)
def _get_log_file_dir(self):
l = self.get_args().log
if l is None:
# DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash
default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME))
return default_log_path
return abspath(self.get_args().log)
def _check_make_dirs(self, directory):
if not exists(directory) or not isdir(directory):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
try:
makedirs(directory)
chmod(directory, self.LOG_DIR_PERMISSION)
log.info("Directory created: " + directory)
except OSError:
log.error("Could not create find/create directory, path may be invalid: " + directory)
exit(1)
@staticmethod
def _check_is_file(path):
if not isfile(path):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
log.error("Could not find the file, file may not been saved: " + path)
exit( | return self.images_to_stack | identifier_body |
parser_manager.py | FocusStack
from CrystalMatch.dls_imagematch import logconfig
from CrystalMatch.dls_imagematch.service import readable_config_dir
from CrystalMatch.dls_imagematch.version import VersionHandler
from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir
from CrystalMatch.dls_util.shape import Point
from CrystalMatch.dls_util.imaging import Image
class ParserManager:
LOG_DIR_PERMISSION = 0o777
LOG_DIR_NAME = 'logs'
LOG_FILE_NAME = 'log'
FOCUSED_IMAGE_NAME = 'processed.tif'
DEFAULT_SCRIPT_PATH = '.CrystalMatch'
def __init__(self):
self.parser = None
self.images_to_stack = None
self._script_path = None
def build_parser(self):
"""Return an argument parser for the Crystal Matching service.
:return: Argument parser.
"""
parser = argparse.ArgumentParser(
description="Run Crystal Matching algorithm attempting to translate co-ordinates "
"on an input image to the coordinate-space of an output image while "
"accounting for possible movement of crystals in the sample.")
if sys.version_info[0] < 3:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=file,
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
else:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=argparse.FileType('r'),
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
parser.add_argument('beamline_stack_path',
metavar="beamline_stack_path",
help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.")
parser.add_argument('selected_points',
metavar="x,y",
nargs='*',
help="Comma-separated co-ordinates of selected points to be translated from the marked image "
"to the target image.")
parser.add_argument('-o','--output',
metavar="focused_image_path",
help="Specify directory for the stacked image. "
"A file called 'processed.tif' will be created in the directory."
"'processed.tif' will be created in log directory if this is not set.")
parser.add_argument('--config',
metavar="path",
action=ReadableConfigDir,
default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),
help="Sets the configuration directory.")
parser.add_argument('--scale',
metavar="scale",
help="The scale between the Formulatrix and beamline image given as the resolution of each "
"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value "
"must be specified for each image using the format "
"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.")
parser.add_argument('-j', '--job',
metavar="job_id",
help="Specify a job_id - this will be reported in the output to help identify this run.")
parser.add_argument('--to_json',
action='store_true',
help="Output a JSON object.")
parser.add_argument('--version',
action='version',
version=VersionHandler.version_string())
parser.add_argument('--log',
metavar="path",
help="Write log files to the directory specified by path.")
self.parser = parser
def get_args(self):
return self.parser.parse_args()
def get_config_dir(self):
config_directory = self.get_args().config
if config_directory is None:
config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME))
return abspath(config_directory)
def get_scale_override(self):
scale = self.get_args().scale
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
if scale is not None:
try:
scales = scale.split(":")
assert (len(scales) == 2)
return float(scales[0]), float(scales[1])
except AssertionError:
log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale)))
raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale))
except ValueError:
log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
raise ValueError(
"Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
return None
def parse_selected_points_from_args(self):
"""Parse the selected points list provided by the command line for validity and returns a list of Point objects.
:param args: Command line arguments provided by argument parser - must contain 'selected_points'
:return: List of Selected Points.
"""
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
selected_points = []
if self.get_args().selected_points:
point_expected_format = re.compile("[0-9]+,[0-9]+")
sel_points = self.get_args().selected_points
for point_string in self.get_args().selected_points:
point_string = point_string.strip('()')
match_results = point_expected_format.match(point_string)
# Check the regex matches the entire string
# DEV NOTE: can use re.full_match in Python v3
if match_results is not None and match_results.span()[1] == len(point_string):
x, y = map(int, point_string.strip('()').split(','))
selected_points.append(Point(x, y))
else:
log.warning("Selected point with invalid format will be ignored - '" + point_string + "'")
return selected_points
def get_focused_image(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
files = self._sort_files_according_to_names(focusing_path)
# Run focusstack
stacker = FocusStack(files, self.get_args().config)
focused_image = stacker.composite()
self.images_to_stack = stacker.get_fft_images_to_stack()
else:
focused_image = Image(cv2.imread(focusing_path))
return focused_image
def get_fft_images_to_stack(self):
return self.images_to_stack
def get_formulatrix_image_path(self):
path = self.get_args().Formulatrix_image.name
self._check_is_file(path)
return path
def | (self):
return self.get_args().to_json
def get_job_id(self):
return self.get_args().job
# returns an error if the focused image is not saved
# may want to change this for saving done later
def get_focused_image_path(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
focusing_path = self.get_out_file_path()
self._check_is_file(focusing_path)
return abspath(focusing_path)
def save_focused_image(self, image):
image.save(self.get_out_file_path())
def get_out_file_path(self):
"""
Get the path to the output file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_output_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.FOCUSED_IMAGE_NAME)
def get_log_file_path(self):
"""
Get the path to the log file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_log_file_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.LOG_FILE_NAME)
def _get_output_dir(self):
out = self.get_args().output
if out is None:
# default - log file directory
default_output_path = self._get_log_file_dir()
return default_output_path
return abspath(self.get_args().output)
def _get_log_file_dir(self):
l = self.get_args().log
if l is None:
# DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash
default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME))
return default_log_path
return abspath(self.get_args().log)
def _check_make_dirs(self, directory):
if not exists(directory) or not isdir(directory):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
try:
makedirs(directory)
chmod(directory, self.LOG_DIR_PERMISSION)
log.info("Directory created: " + directory)
except OSError:
log.error("Could not create find/create directory, path may be invalid: " + directory)
exit(1)
@staticmethod
def _check_is_file(path):
if not isfile(path):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
log.error("Could not find the file, file may not been saved: " + path)
exit | get_to_json | identifier_name |
parser_manager.py | FocusStack
from CrystalMatch.dls_imagematch import logconfig
from CrystalMatch.dls_imagematch.service import readable_config_dir
from CrystalMatch.dls_imagematch.version import VersionHandler
from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir
from CrystalMatch.dls_util.shape import Point
from CrystalMatch.dls_util.imaging import Image
class ParserManager:
LOG_DIR_PERMISSION = 0o777
LOG_DIR_NAME = 'logs'
LOG_FILE_NAME = 'log'
FOCUSED_IMAGE_NAME = 'processed.tif'
DEFAULT_SCRIPT_PATH = '.CrystalMatch'
def __init__(self):
self.parser = None
self.images_to_stack = None
self._script_path = None
def build_parser(self):
"""Return an argument parser for the Crystal Matching service.
:return: Argument parser.
"""
parser = argparse.ArgumentParser(
description="Run Crystal Matching algorithm attempting to translate co-ordinates "
"on an input image to the coordinate-space of an output image while "
"accounting for possible movement of crystals in the sample.")
if sys.version_info[0] < 3:
parser.add_argument('Formulatrix_image', | type=file,
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
else:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=argparse.FileType('r'),
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
parser.add_argument('beamline_stack_path',
metavar="beamline_stack_path",
help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.")
parser.add_argument('selected_points',
metavar="x,y",
nargs='*',
help="Comma-separated co-ordinates of selected points to be translated from the marked image "
"to the target image.")
parser.add_argument('-o','--output',
metavar="focused_image_path",
help="Specify directory for the stacked image. "
"A file called 'processed.tif' will be created in the directory."
"'processed.tif' will be created in log directory if this is not set.")
parser.add_argument('--config',
metavar="path",
action=ReadableConfigDir,
default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),
help="Sets the configuration directory.")
parser.add_argument('--scale',
metavar="scale",
help="The scale between the Formulatrix and beamline image given as the resolution of each "
"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value "
"must be specified for each image using the format "
"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.")
parser.add_argument('-j', '--job',
metavar="job_id",
help="Specify a job_id - this will be reported in the output to help identify this run.")
parser.add_argument('--to_json',
action='store_true',
help="Output a JSON object.")
parser.add_argument('--version',
action='version',
version=VersionHandler.version_string())
parser.add_argument('--log',
metavar="path",
help="Write log files to the directory specified by path.")
self.parser = parser
def get_args(self):
return self.parser.parse_args()
def get_config_dir(self):
config_directory = self.get_args().config
if config_directory is None:
config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME))
return abspath(config_directory)
def get_scale_override(self):
scale = self.get_args().scale
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
if scale is not None:
try:
scales = scale.split(":")
assert (len(scales) == 2)
return float(scales[0]), float(scales[1])
except AssertionError:
log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale)))
raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale))
except ValueError:
log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
raise ValueError(
"Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
return None
def parse_selected_points_from_args(self):
"""Parse the selected points list provided by the command line for validity and returns a list of Point objects.
:param args: Command line arguments provided by argument parser - must contain 'selected_points'
:return: List of Selected Points.
"""
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
selected_points = []
if self.get_args().selected_points:
point_expected_format = re.compile("[0-9]+,[0-9]+")
sel_points = self.get_args().selected_points
for point_string in self.get_args().selected_points:
point_string = point_string.strip('()')
match_results = point_expected_format.match(point_string)
# Check the regex matches the entire string
# DEV NOTE: can use re.full_match in Python v3
if match_results is not None and match_results.span()[1] == len(point_string):
x, y = map(int, point_string.strip('()').split(','))
selected_points.append(Point(x, y))
else:
log.warning("Selected point with invalid format will be ignored - '" + point_string + "'")
return selected_points
def get_focused_image(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
files = self._sort_files_according_to_names(focusing_path)
# Run focusstack
stacker = FocusStack(files, self.get_args().config)
focused_image = stacker.composite()
self.images_to_stack = stacker.get_fft_images_to_stack()
else:
focused_image = Image(cv2.imread(focusing_path))
return focused_image
def get_fft_images_to_stack(self):
return self.images_to_stack
def get_formulatrix_image_path(self):
path = self.get_args().Formulatrix_image.name
self._check_is_file(path)
return path
def get_to_json(self):
return self.get_args().to_json
def get_job_id(self):
return self.get_args().job
# returns an error if the focused image is not saved
# may want to change this for saving done later
def get_focused_image_path(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
focusing_path = self.get_out_file_path()
self._check_is_file(focusing_path)
return abspath(focusing_path)
def save_focused_image(self, image):
image.save(self.get_out_file_path())
def get_out_file_path(self):
"""
Get the path to the output file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_output_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.FOCUSED_IMAGE_NAME)
def get_log_file_path(self):
"""
Get the path to the log file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_log_file_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.LOG_FILE_NAME)
def _get_output_dir(self):
out = self.get_args().output
if out is None:
# default - log file directory
default_output_path = self._get_log_file_dir()
return default_output_path
return abspath(self.get_args().output)
def _get_log_file_dir(self):
l = self.get_args().log
if l is None:
# DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash
default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME))
return default_log_path
return abspath(self.get_args().log)
def _check_make_dirs(self, directory):
if not exists(directory) or not isdir(directory):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
try:
makedirs(directory)
chmod(directory, self.LOG_DIR_PERMISSION)
log.info("Directory created: " + directory)
except OSError:
log.error("Could not create find/create directory, path may be invalid: " + directory)
exit(1)
@staticmethod
def _check_is_file(path):
if not isfile(path):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
log.error("Could not find the file, file may not been saved: " + path)
exit( | metavar="Formulatrix_image_path", | random_line_split |
parser_manager.py | FocusStack
from CrystalMatch.dls_imagematch import logconfig
from CrystalMatch.dls_imagematch.service import readable_config_dir
from CrystalMatch.dls_imagematch.version import VersionHandler
from CrystalMatch.dls_imagematch.service.readable_config_dir import ReadableConfigDir
from CrystalMatch.dls_util.shape import Point
from CrystalMatch.dls_util.imaging import Image
class ParserManager:
LOG_DIR_PERMISSION = 0o777
LOG_DIR_NAME = 'logs'
LOG_FILE_NAME = 'log'
FOCUSED_IMAGE_NAME = 'processed.tif'
DEFAULT_SCRIPT_PATH = '.CrystalMatch'
def __init__(self):
self.parser = None
self.images_to_stack = None
self._script_path = None
def build_parser(self):
"""Return an argument parser for the Crystal Matching service.
:return: Argument parser.
"""
parser = argparse.ArgumentParser(
description="Run Crystal Matching algorithm attempting to translate co-ordinates "
"on an input image to the coordinate-space of an output image while "
"accounting for possible movement of crystals in the sample.")
if sys.version_info[0] < 3:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=file,
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
else:
parser.add_argument('Formulatrix_image',
metavar="Formulatrix_image_path",
type=argparse.FileType('r'),
help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '
'this image.')
parser.add_argument('beamline_stack_path',
metavar="beamline_stack_path",
help="A path pointing at a directory which stores images to be stacked or a path to a stacked image.")
parser.add_argument('selected_points',
metavar="x,y",
nargs='*',
help="Comma-separated co-ordinates of selected points to be translated from the marked image "
"to the target image.")
parser.add_argument('-o','--output',
metavar="focused_image_path",
help="Specify directory for the stacked image. "
"A file called 'processed.tif' will be created in the directory."
"'processed.tif' will be created in log directory if this is not set.")
parser.add_argument('--config',
metavar="path",
action=ReadableConfigDir,
default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),
help="Sets the configuration directory.")
parser.add_argument('--scale',
metavar="scale",
help="The scale between the Formulatrix and beamline image given as the resolution of each "
"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value "
"must be specified for each image using the format "
"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.")
parser.add_argument('-j', '--job',
metavar="job_id",
help="Specify a job_id - this will be reported in the output to help identify this run.")
parser.add_argument('--to_json',
action='store_true',
help="Output a JSON object.")
parser.add_argument('--version',
action='version',
version=VersionHandler.version_string())
parser.add_argument('--log',
metavar="path",
help="Write log files to the directory specified by path.")
self.parser = parser
def get_args(self):
return self.parser.parse_args()
def get_config_dir(self):
config_directory = self.get_args().config
if config_directory is None:
config_directory = abspath(join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME))
return abspath(config_directory)
def get_scale_override(self):
scale = self.get_args().scale
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
if scale is not None:
try:
scales = scale.split(":")
assert (len(scales) == 2)
return float(scales[0]), float(scales[1])
except AssertionError:
log.error(AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale)))
raise AssertionError("Scale flag requires two values separated by a colon':'. Value given: " +
str(scale))
except ValueError:
log.error("Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
raise ValueError(
"Scale must be given as a pair of float values separated by a colon (':'). Value given: " +
str(scale))
return None
def parse_selected_points_from_args(self):
"""Parse the selected points list provided by the command line for validity and returns a list of Point objects.
:param args: Command line arguments provided by argument parser - must contain 'selected_points'
:return: List of Selected Points.
"""
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
selected_points = []
if self.get_args().selected_points:
point_expected_format = re.compile("[0-9]+,[0-9]+")
sel_points = self.get_args().selected_points
for point_string in self.get_args().selected_points:
|
return selected_points
def get_focused_image(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
files = self._sort_files_according_to_names(focusing_path)
# Run focusstack
stacker = FocusStack(files, self.get_args().config)
focused_image = stacker.composite()
self.images_to_stack = stacker.get_fft_images_to_stack()
else:
focused_image = Image(cv2.imread(focusing_path))
return focused_image
def get_fft_images_to_stack(self):
return self.images_to_stack
def get_formulatrix_image_path(self):
path = self.get_args().Formulatrix_image.name
self._check_is_file(path)
return path
def get_to_json(self):
return self.get_args().to_json
def get_job_id(self):
return self.get_args().job
# returns an error if the focused image is not saved
# may want to change this for saving done later
def get_focused_image_path(self):
focusing_path = abspath(self.get_args().beamline_stack_path)
if "." not in focusing_path:
focusing_path = self.get_out_file_path()
self._check_is_file(focusing_path)
return abspath(focusing_path)
def save_focused_image(self, image):
image.save(self.get_out_file_path())
def get_out_file_path(self):
"""
Get the path to the output file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_output_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.FOCUSED_IMAGE_NAME)
def get_log_file_path(self):
"""
Get the path to the log file based on the contents of the config file and the location of the configuration dir.
:return: A string representing the file path of the log file.
"""
dir_path = self._get_log_file_dir()
self._check_make_dirs(dir_path)
return join(dir_path, self.LOG_FILE_NAME)
def _get_output_dir(self):
out = self.get_args().output
if out is None:
# default - log file directory
default_output_path = self._get_log_file_dir()
return default_output_path
return abspath(self.get_args().output)
def _get_log_file_dir(self):
l = self.get_args().log
if l is None:
# DEV NOTE: join and abspath used over split due to uncertainty over config path ending in a slash
default_log_path =abspath(join(self.get_script_path(), self.LOG_DIR_NAME))
return default_log_path
return abspath(self.get_args().log)
def _check_make_dirs(self, directory):
if not exists(directory) or not isdir(directory):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
try:
makedirs(directory)
chmod(directory, self.LOG_DIR_PERMISSION)
log.info("Directory created: " + directory)
except OSError:
log.error("Could not create find/create directory, path may be invalid: " + directory)
exit(1)
@staticmethod
def _check_is_file(path):
if not isfile(path):
log = logging.getLogger(".".join([__name__]))
log.addFilter(logconfig.ThreadContextFilter())
log.error("Could not find the file, file may not been saved: " + path)
exit | point_string = point_string.strip('()')
match_results = point_expected_format.match(point_string)
# Check the regex matches the entire string
# DEV NOTE: can use re.full_match in Python v3
if match_results is not None and match_results.span()[1] == len(point_string):
x, y = map(int, point_string.strip('()').split(','))
selected_points.append(Point(x, y))
else:
log.warning("Selected point with invalid format will be ignored - '" + point_string + "'") | conditional_block |
certificate_fetcher.go | (sub dir name) -> cert
updateCheckInterval int
forceUpdateInterval float64
mu *sync.RWMutex
CertName string
KeyName string
initPollDone bool
initPollMu *sync.RWMutex
}
func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool |
func (fetcher *RCertificateFetcher) setInitPollDone() {
fetcher.initPollMu.Lock()
fetcher.initPollDone = true
fetcher.initPollMu.Unlock()
}
func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) {
// fetch certificates either from mounted certDir or from cattle
certs := []*config.Certificate{}
var defaultCert *config.Certificate
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
for {
if fetcher.checkIfInitPollDone() {
if isDefaultCert {
if fetcher.DefaultCertDir != "" {
logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir)
defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir)
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
} else {
//read all the certificates from the mounted certDir
if fetcher.CertDir != "" {
logrus.Debugf("Found certDir label %v", fetcher.CertDir)
certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir)
certs = append(certs, certsFromDir...)
}
}
break
} else {
logrus.Debugf("Waiting for InitPollDone()")
time.Sleep(time.Duration(2) * time.Second)
}
}
} else {
if !isDefaultCert {
for _, certID := range lbMeta.CertificateIDs {
cert, err := fetcher.FetchRancherCertificate(certID)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
} else {
if lbMeta.DefaultCertificateID != "" {
var err error
defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID)
if err != nil {
return nil, err
}
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
}
}
return certs, nil
}
func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) {
if certID == "" {
return nil, nil
}
opts := client.NewListOpts()
opts.Filters["id"] = certID
opts.Filters["removed_null"] = "1"
cert, err := fetcher.Client.Certificate.ById(certID)
if err != nil {
return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err)
}
if cert == nil {
return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID)
}
certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain)
return &config.Certificate{
Name: cert.Name,
Key: cert.Key,
Cert: certWithChain,
}, nil
}
func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error {
opts := client.NewListOpts()
opts.Filters["uuid"] = lbSvc.UUID
opts.Filters["removed_null"] = "1"
lbs, err := fetcher.Client.LoadBalancerService.List(opts)
if err != nil {
return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err)
}
if len(lbs.Data) == 0 {
logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID)
return nil
}
lb := lbs.Data[0]
toUpdate := make(map[string]interface{})
toUpdate["publicEndpoints"] = eps
logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps)
if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil {
return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err)
}
return nil
}
func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate {
certs := []*config.Certificate{}
fetcher.mu.RLock()
for _, value := range fetcher.CertsCache {
certs = append(certs, value)
}
fetcher.mu.RUnlock()
return certs
}
func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate {
var currentDefCert *config.Certificate
fetcher.mu.RLock()
currentDefCert = fetcher.DefaultCert
fetcher.mu.RUnlock()
return currentDefCert
}
func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) {
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
lastUpdated := time.Now()
for {
logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir)
forceUpdate := false
certsUpdatedFlag := false
logrus.Debugf("lastUpdated %v", lastUpdated)
if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval {
logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval)
forceUpdate = true
}
//read the certs from the dir into tempMap
if fetcher.CertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir)
} else {
//compare with existing cache
if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) {
if !forceUpdate {
logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir)
} else {
logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir)
}
//there is some change, refresh certs
fetcher.mu.Lock()
fetcher.CertsCache = make(map[string]*config.Certificate)
for path, newCert := range fetcher.tempCertsMap {
fetcher.CertsCache[path] = newCert
logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name)
}
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
//read the cert from the defaultCertDir into tempMap
if fetcher.DefaultCertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir)
} else {
var tempDefCert *config.Certificate
for _, cert := range fetcher.tempCertsMap {
tempDefCert = cert
}
//compare with existing default cert
if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) {
fetcher.mu.Lock()
fetcher.DefaultCert = tempDefCert
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
if certsUpdatedFlag {
//scheduleApplyConfig
doOnUpdate("")
lastUpdated = time.Now()
}
if !fetcher.checkIfInitPollDone() {
fetcher.setInitPollDone()
}
logrus.Debug("Done --- LookForCertUpdates poll")
time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second)
}
}
}
func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error {
if f != nil && f.IsDir() {
if err != nil {
return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err)
}
logrus.Debugf("Walking dir %v", path)
isCertFound := false
isKeyFound := false
cert := config.Certificate{}
cert.Name = f.Name()
files, err := | {
isDone := false
fetcher.initPollMu.RLock()
isDone = fetcher.initPollDone
fetcher.initPollMu.RUnlock()
return isDone
} | identifier_body |
certificate_fetcher.go | name (sub dir name) -> cert
updateCheckInterval int
forceUpdateInterval float64
mu *sync.RWMutex
CertName string
KeyName string
initPollDone bool
initPollMu *sync.RWMutex
}
func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool {
isDone := false
fetcher.initPollMu.RLock()
isDone = fetcher.initPollDone
fetcher.initPollMu.RUnlock()
return isDone
}
func (fetcher *RCertificateFetcher) setInitPollDone() {
fetcher.initPollMu.Lock()
fetcher.initPollDone = true
fetcher.initPollMu.Unlock()
}
func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) {
// fetch certificates either from mounted certDir or from cattle
certs := []*config.Certificate{}
var defaultCert *config.Certificate
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
for {
if fetcher.checkIfInitPollDone() {
if isDefaultCert {
if fetcher.DefaultCertDir != "" {
logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir)
defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir)
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
} else {
//read all the certificates from the mounted certDir
if fetcher.CertDir != "" {
logrus.Debugf("Found certDir label %v", fetcher.CertDir)
certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir)
certs = append(certs, certsFromDir...)
}
}
break
} else {
logrus.Debugf("Waiting for InitPollDone()")
time.Sleep(time.Duration(2) * time.Second)
}
}
} else {
if !isDefaultCert {
for _, certID := range lbMeta.CertificateIDs {
cert, err := fetcher.FetchRancherCertificate(certID)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
} else {
if lbMeta.DefaultCertificateID != "" {
var err error
defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID)
if err != nil {
return nil, err
}
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
}
}
return certs, nil
}
func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) {
if certID == "" {
return nil, nil
}
opts := client.NewListOpts()
opts.Filters["id"] = certID
opts.Filters["removed_null"] = "1"
cert, err := fetcher.Client.Certificate.ById(certID)
if err != nil {
return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err)
}
if cert == nil {
return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID)
}
certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain)
return &config.Certificate{
Name: cert.Name,
Key: cert.Key,
Cert: certWithChain,
}, nil
}
func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error {
opts := client.NewListOpts()
opts.Filters["uuid"] = lbSvc.UUID
opts.Filters["removed_null"] = "1"
lbs, err := fetcher.Client.LoadBalancerService.List(opts)
if err != nil {
return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err)
}
if len(lbs.Data) == 0 {
logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID)
return nil
}
lb := lbs.Data[0]
toUpdate := make(map[string]interface{})
toUpdate["publicEndpoints"] = eps
logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps)
if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil {
return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err)
}
return nil
}
func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate {
certs := []*config.Certificate{}
fetcher.mu.RLock()
for _, value := range fetcher.CertsCache {
certs = append(certs, value)
}
fetcher.mu.RUnlock()
return certs
}
func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate {
var currentDefCert *config.Certificate
fetcher.mu.RLock()
currentDefCert = fetcher.DefaultCert
fetcher.mu.RUnlock()
return currentDefCert
}
func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) {
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
lastUpdated := time.Now()
for {
logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir)
forceUpdate := false
certsUpdatedFlag := false
logrus.Debugf("lastUpdated %v", lastUpdated)
if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval {
logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval)
forceUpdate = true
}
//read the certs from the dir into tempMap
if fetcher.CertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir) | } else {
logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir)
}
//there is some change, refresh certs
fetcher.mu.Lock()
fetcher.CertsCache = make(map[string]*config.Certificate)
for path, newCert := range fetcher.tempCertsMap {
fetcher.CertsCache[path] = newCert
logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name)
}
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
//read the cert from the defaultCertDir into tempMap
if fetcher.DefaultCertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir)
} else {
var tempDefCert *config.Certificate
for _, cert := range fetcher.tempCertsMap {
tempDefCert = cert
}
//compare with existing default cert
if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) {
fetcher.mu.Lock()
fetcher.DefaultCert = tempDefCert
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
if certsUpdatedFlag {
//scheduleApplyConfig
doOnUpdate("")
lastUpdated = time.Now()
}
if !fetcher.checkIfInitPollDone() {
fetcher.setInitPollDone()
}
logrus.Debug("Done --- LookForCertUpdates poll")
time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second)
}
}
}
func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error {
if f != nil && f.IsDir() {
if err != nil {
return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err)
}
logrus.Debugf("Walking dir %v", path)
isCertFound := false
isKeyFound := false
cert := config.Certificate{}
cert.Name = f.Name()
files, err := ioutil | } else {
//compare with existing cache
if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) {
if !forceUpdate {
logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir) | random_line_split |
certificate_fetcher.go | (sub dir name) -> cert
updateCheckInterval int
forceUpdateInterval float64
mu *sync.RWMutex
CertName string
KeyName string
initPollDone bool
initPollMu *sync.RWMutex
}
func (fetcher *RCertificateFetcher) checkIfInitPollDone() bool {
isDone := false
fetcher.initPollMu.RLock()
isDone = fetcher.initPollDone
fetcher.initPollMu.RUnlock()
return isDone
}
func (fetcher *RCertificateFetcher) setInitPollDone() {
fetcher.initPollMu.Lock()
fetcher.initPollDone = true
fetcher.initPollMu.Unlock()
}
func (fetcher *RCertificateFetcher) FetchCertificates(lbMeta *LBMetadata, isDefaultCert bool) ([]*config.Certificate, error) {
// fetch certificates either from mounted certDir or from cattle
certs := []*config.Certificate{}
var defaultCert *config.Certificate
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
for {
if fetcher.checkIfInitPollDone() {
if isDefaultCert {
if fetcher.DefaultCertDir != "" {
logrus.Debugf("Found defaultCertDir label %v", fetcher.DefaultCertDir)
defaultCert = fetcher.ReadDefaultCertificate(fetcher.DefaultCertDir)
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
} else {
//read all the certificates from the mounted certDir
if fetcher.CertDir != "" {
logrus.Debugf("Found certDir label %v", fetcher.CertDir)
certsFromDir := fetcher.ReadAllCertificatesFromDir(fetcher.CertDir)
certs = append(certs, certsFromDir...)
}
}
break
} else {
logrus.Debugf("Waiting for InitPollDone()")
time.Sleep(time.Duration(2) * time.Second)
}
}
} else {
if !isDefaultCert {
for _, certID := range lbMeta.CertificateIDs {
cert, err := fetcher.FetchRancherCertificate(certID)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
} else {
if lbMeta.DefaultCertificateID != "" {
var err error
defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID)
if err != nil {
return nil, err
}
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
}
}
return certs, nil
}
func (fetcher *RCertificateFetcher) | (certID string) (*config.Certificate, error) {
if certID == "" {
return nil, nil
}
opts := client.NewListOpts()
opts.Filters["id"] = certID
opts.Filters["removed_null"] = "1"
cert, err := fetcher.Client.Certificate.ById(certID)
if err != nil {
return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err)
}
if cert == nil {
return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID)
}
certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain)
return &config.Certificate{
Name: cert.Name,
Key: cert.Key,
Cert: certWithChain,
}, nil
}
func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error {
opts := client.NewListOpts()
opts.Filters["uuid"] = lbSvc.UUID
opts.Filters["removed_null"] = "1"
lbs, err := fetcher.Client.LoadBalancerService.List(opts)
if err != nil {
return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err)
}
if len(lbs.Data) == 0 {
logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID)
return nil
}
lb := lbs.Data[0]
toUpdate := make(map[string]interface{})
toUpdate["publicEndpoints"] = eps
logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps)
if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil {
return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err)
}
return nil
}
func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate {
certs := []*config.Certificate{}
fetcher.mu.RLock()
for _, value := range fetcher.CertsCache {
certs = append(certs, value)
}
fetcher.mu.RUnlock()
return certs
}
func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate {
var currentDefCert *config.Certificate
fetcher.mu.RLock()
currentDefCert = fetcher.DefaultCert
fetcher.mu.RUnlock()
return currentDefCert
}
func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) {
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
lastUpdated := time.Now()
for {
logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir)
forceUpdate := false
certsUpdatedFlag := false
logrus.Debugf("lastUpdated %v", lastUpdated)
if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval {
logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval)
forceUpdate = true
}
//read the certs from the dir into tempMap
if fetcher.CertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir)
} else {
//compare with existing cache
if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) {
if !forceUpdate {
logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir)
} else {
logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir)
}
//there is some change, refresh certs
fetcher.mu.Lock()
fetcher.CertsCache = make(map[string]*config.Certificate)
for path, newCert := range fetcher.tempCertsMap {
fetcher.CertsCache[path] = newCert
logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name)
}
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
//read the cert from the defaultCertDir into tempMap
if fetcher.DefaultCertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir)
} else {
var tempDefCert *config.Certificate
for _, cert := range fetcher.tempCertsMap {
tempDefCert = cert
}
//compare with existing default cert
if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) {
fetcher.mu.Lock()
fetcher.DefaultCert = tempDefCert
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
if certsUpdatedFlag {
//scheduleApplyConfig
doOnUpdate("")
lastUpdated = time.Now()
}
if !fetcher.checkIfInitPollDone() {
fetcher.setInitPollDone()
}
logrus.Debug("Done --- LookForCertUpdates poll")
time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second)
}
}
}
func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error {
if f != nil && f.IsDir() {
if err != nil {
return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err)
}
logrus.Debugf("Walking dir %v", path)
isCertFound := false
isKeyFound := false
cert := config.Certificate{}
cert.Name = f.Name()
files, err := ioutil | FetchRancherCertificate | identifier_name |
certificate_fetcher.go | for _, certID := range lbMeta.CertificateIDs {
cert, err := fetcher.FetchRancherCertificate(certID)
if err != nil {
return nil, err
}
certs = append(certs, cert)
}
} else {
if lbMeta.DefaultCertificateID != "" {
var err error
defaultCert, err = fetcher.FetchRancherCertificate(lbMeta.DefaultCertificateID)
if err != nil {
return nil, err
}
if defaultCert != nil {
certs = append(certs, defaultCert)
}
}
}
}
return certs, nil
}
func (fetcher *RCertificateFetcher) FetchRancherCertificate(certID string) (*config.Certificate, error) {
if certID == "" {
return nil, nil
}
opts := client.NewListOpts()
opts.Filters["id"] = certID
opts.Filters["removed_null"] = "1"
cert, err := fetcher.Client.Certificate.ById(certID)
if err != nil {
return nil, fmt.Errorf("Coudln't get certificate by id [%s]. Error: %#v", certID, err)
}
if cert == nil {
return nil, fmt.Errorf("Failed to fetch certificate by id [%s]", certID)
}
certWithChain := fmt.Sprintf("%s\n%s", cert.Cert, cert.CertChain)
return &config.Certificate{
Name: cert.Name,
Key: cert.Key,
Cert: certWithChain,
}, nil
}
func (fetcher *RCertificateFetcher) UpdateEndpoints(lbSvc *metadata.Service, eps []client.PublicEndpoint) error {
opts := client.NewListOpts()
opts.Filters["uuid"] = lbSvc.UUID
opts.Filters["removed_null"] = "1"
lbs, err := fetcher.Client.LoadBalancerService.List(opts)
if err != nil {
return fmt.Errorf("Coudln't get LB service by uuid [%s]. Error: %#v", lbSvc.UUID, err)
}
if len(lbs.Data) == 0 {
logrus.Infof("Failed to find lb by uuid %s", lbSvc.UUID)
return nil
}
lb := lbs.Data[0]
toUpdate := make(map[string]interface{})
toUpdate["publicEndpoints"] = eps
logrus.Infof("Updating Rancher LB [%s] in stack [%s] with the new public endpoints [%v] ", lbSvc.Name, lbSvc.StackName, eps)
if _, err := fetcher.Client.LoadBalancerService.Update(&lb, toUpdate); err != nil {
return fmt.Errorf("Failed to update Rancher LB [%s] in stack [%s]. Error: %#v", lbSvc.Name, lbSvc.StackName, err)
}
return nil
}
func (fetcher *RCertificateFetcher) ReadAllCertificatesFromDir(certDir string) []*config.Certificate {
certs := []*config.Certificate{}
fetcher.mu.RLock()
for _, value := range fetcher.CertsCache {
certs = append(certs, value)
}
fetcher.mu.RUnlock()
return certs
}
func (fetcher *RCertificateFetcher) ReadDefaultCertificate(defaultCertDir string) *config.Certificate {
var currentDefCert *config.Certificate
fetcher.mu.RLock()
currentDefCert = fetcher.DefaultCert
fetcher.mu.RUnlock()
return currentDefCert
}
func (fetcher *RCertificateFetcher) LookForCertUpdates(doOnUpdate func(string)) {
if fetcher.CertDir != "" || fetcher.DefaultCertDir != "" {
lastUpdated := time.Now()
for {
logrus.Debugf("Start --- LookForCertUpdates polling cert dir %v and default cert dir %v", fetcher.CertDir, fetcher.DefaultCertDir)
forceUpdate := false
certsUpdatedFlag := false
logrus.Debugf("lastUpdated %v", lastUpdated)
if time.Since(lastUpdated).Seconds() >= fetcher.forceUpdateInterval {
logrus.Infof("LookForCertUpdates: Executing force update as certs in cache have not been updated in: %v seconds", fetcher.forceUpdateInterval)
forceUpdate = true
}
//read the certs from the dir into tempMap
if fetcher.CertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.CertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading certs from cert dir %v", err, fetcher.CertDir)
} else {
//compare with existing cache
if forceUpdate || !reflect.DeepEqual(fetcher.CertsCache, fetcher.tempCertsMap) {
if !forceUpdate {
logrus.Infof("LookForCertUpdates: Found an update in cert dir %v, updating the cache", fetcher.CertDir)
} else {
logrus.Infof("LookForCertUpdates: Force Update triggered, updating the cache from cert dir %v", fetcher.CertDir)
}
//there is some change, refresh certs
fetcher.mu.Lock()
fetcher.CertsCache = make(map[string]*config.Certificate)
for path, newCert := range fetcher.tempCertsMap {
fetcher.CertsCache[path] = newCert
logrus.Debugf("LookForCertUpdates: Cert is reloaded in cache : %v", newCert.Name)
}
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
//read the cert from the defaultCertDir into tempMap
if fetcher.DefaultCertDir != "" {
fetcher.tempCertsMap = make(map[string]*config.Certificate)
err := filepath.Walk(fetcher.DefaultCertDir, fetcher.readCertificate)
if err != nil {
logrus.Errorf("LookForCertUpdates: Error %v reading default cert from dir %v", err, fetcher.DefaultCertDir)
} else {
var tempDefCert *config.Certificate
for _, cert := range fetcher.tempCertsMap {
tempDefCert = cert
}
//compare with existing default cert
if forceUpdate || !reflect.DeepEqual(fetcher.DefaultCert, tempDefCert) {
fetcher.mu.Lock()
fetcher.DefaultCert = tempDefCert
certsUpdatedFlag = true
fetcher.mu.Unlock()
}
}
}
if certsUpdatedFlag {
//scheduleApplyConfig
doOnUpdate("")
lastUpdated = time.Now()
}
if !fetcher.checkIfInitPollDone() {
fetcher.setInitPollDone()
}
logrus.Debug("Done --- LookForCertUpdates poll")
time.Sleep(time.Duration(fetcher.updateCheckInterval) * time.Second)
}
}
}
func (fetcher *RCertificateFetcher) readCertificate(path string, f os.FileInfo, err error) error {
if f != nil && f.IsDir() {
if err != nil {
return fmt.Errorf("Error while walking dir [%v]. Error: %v", path, err)
}
logrus.Debugf("Walking dir %v", path)
isCertFound := false
isKeyFound := false
cert := config.Certificate{}
cert.Name = f.Name()
files, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, file := range files {
if !file.IsDir() {
contentBytes, err := fetcher.evaluatueLinkAndReadFile(path, file.Name())
if err != nil {
logrus.Errorf("Error while reading file [%v]. Error: %v", file.Name(), err)
} else {
if file.Name() == fetcher.CertName {
isCertFound = true
cert.Cert = string(*contentBytes)
} else if file.Name() == fetcher.KeyName {
isKeyFound = true
cert.Key = string(*contentBytes)
}
}
}
}
if isCertFound && isKeyFound {
fetcher.tempCertsMap[path] = &cert
} else if isCertFound || isKeyFound {
logrus.Warnf("Skipping incomplete cert found under dir [%v], [isCertFound %v] [isKeyFound %v]", path, isCertFound, isKeyFound)
}
}
return nil
}
func (fetcher *RCertificateFetcher) evaluatueLinkAndReadFile(relativePath string, fileName string) (*[]byte, error) {
filePath := path.Join(relativePath, fileName)
absFilePath, err := filepath.Abs(filePath)
if err != nil {
return nil, fmt.Errorf("Error forming path to file %s, error: %v", filePath, err)
}
fInfo, err := os.Lstat(absFilePath)
if os.IsNotExist(err) {
return nil, fmt.Errorf("File %s does not exist", absFilePath)
}
targetPath := absFilePath
if fInfo.Mode()&os.ModeSymlink != 0 {
//read symlink
targetPath, err := filepath.EvalSymlinks(absFilePath)
if err != nil | {
return nil, fmt.Errorf("File %s pointed by symlink %s does not exist, error: %v", targetPath, absFilePath, err)
} | conditional_block |
|
ProteinRNN.py |
input_ids = tf.one_hot(input_ids,21)
input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32)
# Run the model.
predicted_logits = self.model(inputs=input_ids)
# Only use the last prediction.
predicted_logits = predicted_logits / self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters.
return predicted_chars
##### end of class #####
# convert made up sequence to a tensor
# line is the temp[0] pass as an argument here
def FlipChars(line):
|
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
# get one hot tensor and test label
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
# convert labels to tensors and one hot to a tensor again
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
test = ops.convert_to_tensor(seq, dtype=tf.float32)
return test, test_label
### The function for gathering tests
def read_seqV2(sequence):
f = open(sequence, 'r')
test = []
testlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return test, testlabel
# get testing data
if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train.
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
count += 1
#### The function for reading and parsing file
def read_seq(sequence):
f = open(sequence, 'r')
train = []
trainlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return train, trainlabel
# training data addin
if count % 5 != 0: # training data found
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
count += 1
# We parse files to get training data
seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt')
seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt')
# We reshape labels to be 2d arrays
train_label = np.asarray(train_label).astype('float32').reshape((-1,1))
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
# We convert labels (y_train and y_test) to tensors
train_label = ops.convert_to_tensor(train_label, dtype=tf.float32)
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
# We make the training and testing tensors floats instead of ints
seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32)
seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32)
# We create out recurrent neural network (RNN)
model = keras.Sequential(
[
layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized
layers.BatchNormalization(),
layers.Dropout(0.6),
layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.7),
layers.Dense(21, activation='softmax') # Output layer
]
)
"""
For extra credit portion - testing
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=1000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
Optimizer.apply_gradients(
grads_and_vars, name=None, experimental_aggregate_gradients=True
)
"""
# We compile the model
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# We get the history of the model to plot stuff
historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label))
# save the model
#model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128')
# We print out a summary of our model
model.summary()
# We plot results of the training model
plt.plot(historyMod.history['accuracy'], label='Training data')
plt.plot(historyMod.history['val_accuracy'], label='Validation data')
plt.title('Accuracy training vs. Accuracy validation')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend | testlabel = []
test = []
temp = line.split()
charList = list(temp)
seq = []
for i in range(50): # convert each letter into an int, randomly flip a char or more
# these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation
if i == 15:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i == 25:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i >= 45:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue | identifier_body |
ProteinRNN.py |
input_ids = tf.one_hot(input_ids,21)
input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32)
# Run the model.
predicted_logits = self.model(inputs=input_ids)
# Only use the last prediction.
predicted_logits = predicted_logits / self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters.
return predicted_chars
##### end of class #####
# convert made up sequence to a tensor
# line is the temp[0] pass as an argument here
def | (line):
testlabel = []
test = []
temp = line.split()
charList = list(temp)
seq = []
for i in range(50): # convert each letter into an int, randomly flip a char or more
# these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation
if i == 15:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i == 25:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i >= 45:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
# get one hot tensor and test label
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
# convert labels to tensors and one hot to a tensor again
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
test = ops.convert_to_tensor(seq, dtype=tf.float32)
return test, test_label
### The function for gathering tests
def read_seqV2(sequence):
f = open(sequence, 'r')
test = []
testlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return test, testlabel
# get testing data
if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train.
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
count += 1
#### The function for reading and parsing file
def read_seq(sequence):
f = open(sequence, 'r')
train = []
trainlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return train, trainlabel
# training data addin
if count % 5 != 0: # training data found
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
count += 1
# We parse files to get training data
seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt')
seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt')
# We reshape labels to be 2d arrays
train_label = np.asarray(train_label).astype('float32').reshape((-1,1))
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
# We convert labels (y_train and y_test) to tensors
train_label = ops.convert_to_tensor(train_label, dtype=tf.float32)
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
# We make the training and testing tensors floats instead of ints
seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32)
seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32)
# We create out recurrent neural network (RNN)
model = keras.Sequential(
[
layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized
layers.BatchNormalization(),
layers.Dropout(0.6),
layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.7),
layers.Dense(21, activation='softmax') # Output layer
]
)
"""
For extra credit portion - testing
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=1000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
Optimizer.apply_gradients(
grads_and_vars, name=None, experimental_aggregate_gradients=True
)
"""
# We compile the model
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# We get the history of the model to plot stuff
historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label))
# save the model
#model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128')
# We print out a summary of our model
model.summary()
# We plot results of the training model
plt.plot(historyMod.history['accuracy'], label='Training data')
plt.plot(historyMod.history['val_accuracy'], label='Validation data')
plt.title('Accuracy training vs. Accuracy validation')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt | FlipChars | identifier_name |
ProteinRNN.py | .append(0)
# grab the labels and convert seq into a one hot encoding of 21
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
count += 1
#### The function for reading and parsing file
def read_seq(sequence):
f = open(sequence, 'r')
train = []
trainlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return train, trainlabel
# training data addin
if count % 5 != 0: # training data found
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
count += 1
# We parse files to get training data
seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt')
seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt')
# We reshape labels to be 2d arrays
train_label = np.asarray(train_label).astype('float32').reshape((-1,1))
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
# We convert labels (y_train and y_test) to tensors
train_label = ops.convert_to_tensor(train_label, dtype=tf.float32)
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
# We make the training and testing tensors floats instead of ints
seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32)
seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32)
# We create out recurrent neural network (RNN)
model = keras.Sequential(
[
layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized
layers.BatchNormalization(),
layers.Dropout(0.6),
layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.7),
layers.Dense(21, activation='softmax') # Output layer
]
)
"""
For extra credit portion - testing
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=1000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
Optimizer.apply_gradients(
grads_and_vars, name=None, experimental_aggregate_gradients=True
)
"""
# We compile the model
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# We get the history of the model to plot stuff
historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label))
# save the model
#model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128')
# We print out a summary of our model
model.summary()
# We plot results of the training model
plt.plot(historyMod.history['accuracy'], label='Training data')
plt.plot(historyMod.history['val_accuracy'], label='Validation data')
plt.title('Accuracy training vs. Accuracy validation')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(loc="upper left")
plt.show()
# We plot the loss
plt.plot(historyMod.history['loss'], label='Training data')
plt.plot(historyMod.history['val_loss'], label='Validation data')
plt.title('Loss training vs. Loss validation')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(loc="upper left")
plt.show()
# This portion takes care of the perplexity plotting
perplexity = tf.exp(historyMod.history['loss'])
plt.plot(perplexity)
plt.title('Perplexity of RNN')
plt.show()
############################################################################
########################### TASK III #####################################
# proteins for trial
protein_seq = "MVLSEGEWQLVLHVWAKVEADVAGHGQDILIRAEKLFNQDVDAAVRGILR"
protein_seq2 = "MPPYTVVYFPVRGRCAALRMLLADQGQSWKEEVVTVETWQEGSLKASCLY"
protein_seq3 = "KVFERCELARTLKRLGMDGYRGISLANWMCLAKWESGYNTRATNYNAGDR"
protein_seq4 = "FNASSGDSKKIVGVFYKANEYATKNPNFLGCVENALGIRDWLESQGHQYI"
protein_seq5 = "MDSEVQRDGRILDLIDDAWREDKLPYEDVAIPLNELPEPEQDNGGTTESV"
# protein to get vocabulary
example_protein = "MNIFEMLRIDEGLRLKIYKDTEGYYTIGIGHLLTKSPSLNAAKSELDKAIGRNTNGVITKDEAEKLFNQDVDAAVRGILRNAKLKPVYDSLDTVRRAALINMVFQMGETGVAGFTNSLRMLQQKRWDEAAVNLAKSRWYNQTPNRAKRVITTFRTGTWDAYKNL"
# getting the vocabulary of the protein sequence as well as their associated IDs
vocab = sorted(set(example_protein))
ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab), mask_token=None)
chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None)
# get the one step modelclass initialized so prediction can be performed
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
# preparing trials
trials = 1
k = 1
i = 0
array_of_proteins = []
array_of_proteins.append(protein_seq)
array_of_proteins.append(protein_seq2)
array_of_proteins.append(protein_seq3)
array_of_proteins.append(protein_seq4)
array_of_proteins.append(protein_seq5)
#array_of_proteins = np.array(array_of_proteins)
# beginning trials
while trials < 6:
print("\nBeginning trial " + str(trials))
print("===============================================================")
print("===============================================================\n")
ar = array_of_proteins[i]
while k != 20:
chars = ar[:k]
next_char = tf.constant([chars])
result = []
result.append(chars)
next_letter = []
for n in range(50-k):
next_letter = one_step_model.generate_one_step(next_char)
next_letter_np = next_letter.numpy()
result.append(next_letter_np[0])
print("When k = " + str(k))
print("-"*len(result))
#k += 1
print("\n-----------Finding matches-----------\n")
print("Prediction with seed of " + str(k))
matches = 0
checkMatches = ar[k:]
k += 1
for x in range(len(checkMatches)):
if checkMatches[x].encode("utf-8") == result[x]:
matches += 1
else:
| continue | conditional_block |
|
ProteinRNN.py |
input_ids = tf.one_hot(input_ids,21)
input_ids = ops.convert_to_tensor(input_ids, dtype=tf.float32)
# Run the model.
predicted_logits = self.model(inputs=input_ids)
# Only use the last prediction.
predicted_logits = predicted_logits / self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters.
return predicted_chars
##### end of class #####
| # line is the temp[0] pass as an argument here
def FlipChars(line):
testlabel = []
test = []
temp = line.split()
charList = list(temp)
seq = []
for i in range(50): # convert each letter into an int, randomly flip a char or more
# these 3 if conditions randomly flip a character letter to find dependeny, can comment out either or for experimentation
if i == 15:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i == 25:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
if i >= 45:
seq.append(ord(string.ascii_uppercase[random.randint(0,26)]) - ord('A') + 1)
continue
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
# get one hot tensor and test label
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
# convert labels to tensors and one hot to a tensor again
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
test = ops.convert_to_tensor(seq, dtype=tf.float32)
return test, test_label
### The function for gathering tests
def read_seqV2(sequence):
f = open(sequence, 'r')
test = []
testlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return test, testlabel
# get testing data
if count % 5 == 0: # else, put it in a the training set, also known as x_train, and training label set, also known as y_train.
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
test.append(seq[:len(seq)-1])
testlabel.append(label)
seq = []
count += 1
#### The function for reading and parsing file
def read_seq(sequence):
f = open(sequence, 'r')
train = []
trainlabel = []
# Reading file and extracting paths and labels
with open(sequence, 'r') as File:
infoFile = File.readlines() # Reading all the lines from File
count = 0
for line in infoFile: #Reading line-by-line
if count == 44011:
return train, trainlabel
# training data addin
if count % 5 != 0: # training data found
temp = line.split()
charList = list(temp[0])
seq = []
# if for some reason the protein sequence is less than 50 char long
if len(charList) < 100:
for i in range(len(charList)):
seq.append(ord(charList[i]) - ord('A') + 1)
while len(seq) < 100:
seq.append(0)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
else:
for i in range(100): # convert each letter into an int
seq.append(ord(charList[i]) - ord('A') + 1)
# grab the labels and convert seq into a one hot encoding of 21
if seq[len(seq)-1] > 20:
seq[len(seq)-1] = 20
label = seq[len(seq)-1]
seq = tf.one_hot(seq,21)
train.append(seq[:len(seq)-1])
trainlabel.append(label)
seq = []
count += 1
# We parse files to get training data
seq_train, train_label = read_seq('/content/gdrive/My Drive/pdb_seqres.txt')
seq_test, test_label = read_seqV2('/content/gdrive/My Drive/pdb_seqres.txt')
# We reshape labels to be 2d arrays
train_label = np.asarray(train_label).astype('float32').reshape((-1,1))
test_label = np.asarray(test_label).astype('float32').reshape((-1,1))
# We convert labels (y_train and y_test) to tensors
train_label = ops.convert_to_tensor(train_label, dtype=tf.float32)
test_label = ops.convert_to_tensor(test_label, dtype=tf.float32)
# We make the training and testing tensors floats instead of ints
seq_train = ops.convert_to_tensor(seq_train, dtype=tf.float32)
seq_test = ops.convert_to_tensor(seq_test, dtype=tf.float32)
# We create out recurrent neural network (RNN)
model = keras.Sequential(
[
layers.LSTM(64, return_sequences=True, activation='relu'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.LSTM(128, return_sequences=True, kernel_initializer='glorot_uniform', activation='tanh', bias_initializer='zeros'), # This is the LSTM layer with weights initialized
layers.BatchNormalization(),
layers.Dropout(0.6),
layers.LSTM(64, return_sequences=False, activation='tanh'), # LSTM layer
layers.BatchNormalization(),
layers.Dropout(0.7),
layers.Dense(21, activation='softmax') # Output layer
]
)
"""
For extra credit portion - testing
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-2,
decay_steps=1000,
decay_rate=0.9)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule)
Optimizer.apply_gradients(
grads_and_vars, name=None, experimental_aggregate_gradients=True
)
"""
# We compile the model
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# We get the history of the model to plot stuff
historyMod = model.fit(x=seq_train, y=train_label, epochs=25, batch_size=128, validation_data=(seq_test, test_label))
# save the model
#model.save_weights('/content/gdrive/My Drive/saved_model-35E_BIG+_BS128')
# We print out a summary of our model
model.summary()
# We plot results of the training model
plt.plot(historyMod.history['accuracy'], label='Training data')
plt.plot(historyMod.history['val_accuracy'], label='Validation data')
plt.title('Accuracy training vs. Accuracy validation')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend |
# convert made up sequence to a tensor | random_line_split |
accounts.go | callGasLimit uint64
}
func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts {
return &Accounts{
chain,
stateCreator,
callGasLimit,
}
}
func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return nil, err
}
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return code, nil
}
func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error {
hexAddr := mux.Vars(req)["address"]
addr, err := polo.ParseAddress(hexAddr)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
code, err := a.getCode(addr, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)})
}
func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) {
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
b := state.GetBalance(addr)
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: len(code) != 0,
}, nil
}
func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return polo.Bytes32{}, err
}
storage := state.GetStorage(addr, key)
if err := state.Err(); err != nil {
return polo.Bytes32{}, err
}
return storage, nil
}
func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
tokenAddress := req.URL.Query().Get("tokenAddress")
if len(tokenAddress) > 0 {
//获取ERC20 token balance
contractAddress, err := polo.ParseAddress(tokenAddress)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
callData := &CallData{}
hexStr := balanceSig
data := poloclient.FromHex(hexStr)
bytesAddr := addr.Bytes()
for i := 0; i < 32 - len(bytesAddr); i++ {
data = append(data, 0)
}
for _, v := range bytesAddr {
data = append(data, byte(v))
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: &contractAddress,
Value: callData.Value,
Data: hexutil.Encode(data),
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
decodeData, err := hexutil.Decode(results[0].Data)
if err != nil {
return err
}
if len(decodeData) > 32 {
return fmt.Errorf("decodeData error")
}
b := big.NewInt(0)
b.SetBytes(decodeData)
acc := &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: false,
}
return utils.WriteTo(w, req, acc)
} else {
acc, err := a.getAccount(addr, h)
if err != nil {
return err
}
return utils.WriteTo(w, req, acc)
}
}
func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
key, err := polo.ParseBytes32(mux.Vars(req)["key"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "key"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
storage, err := a.getStorage(addr, key, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"value": storage.String()})
}
func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error {
callData := &CallData{}
if err := utils.ParseJSON(req.Body, &callData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
var addr *polo.Address
if mux.Vars(req)["address"] != "" {
address, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
addr = &address
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: addr,
Value: callData.Value,
Data: callData.Data,
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results[0])
}
func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error {
batchCallData := &BatchCallData{}
if err := utils.ParseJSON(req.Body, &batchCallData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results)
}
func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) {
gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData)
if err != nil {
return nil, err
}
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
signer, _ := header.Signer()
rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state, | Beneficiary: header.Beneficiary(),
Signer: signer,
Number: header.Number(),
Time: header.Timestamp(),
GasLimit: header.GasLimit(),
TotalScore: header.TotalScore()})
results = make(BatchCallResults, 0)
vmout := make(chan *runtime.Output, 1)
for i, clause := range clauses {
exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{
Origin: *caller,
GasPrice: gasPrice})
go func() {
out, _ := exec()
vmout <- out
}()
select {
case <-ctx.Done():
interrupt()
return nil, ctx.Err()
case out := <-vmout:
if err := rt.Seeker().Err(); err != nil {
return nil, err
}
if err := state.Err(); err != nil {
return nil, err
}
results = append(results, convertCallResultWithInputGas(out, gas))
if out.VMErr != nil {
return results, nil
}
gas = out.LeftOverGas
}
}
return results, nil
}
func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) {
if batchCallData.Gas > a.callGasLimit {
return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit"))
} else if batchCallData.Gas == 0 {
gas = a.callGasLimit
} else {
gas = batchCallData | &xenv.BlockContext{ | random_line_split |
accounts.go | {
return polo.Bytes32{}, err
}
storage := state.GetStorage(addr, key)
if err := state.Err(); err != nil {
return polo.Bytes32{}, err
}
return storage, nil
}
func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
tokenAddress := req.URL.Query().Get("tokenAddress")
if len(tokenAddress) > 0 {
//获取ERC20 token balance
contractAddress, err := polo.ParseAddress(tokenAddress)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
callData := &CallData{}
hexStr := balanceSig
data := poloclient.FromHex(hexStr)
bytesAddr := addr.Bytes()
for i := 0; i < 32 - len(bytesAddr); i++ {
data = append(data, 0)
}
for _, v := range bytesAddr {
data = append(data, byte(v))
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: &contractAddress,
Value: callData.Value,
Data: hexutil.Encode(data),
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
decodeData, err := hexutil.Decode(results[0].Data)
if err != nil {
return err
}
if len(decodeData) > 32 {
return fmt.Errorf("decodeData error")
}
b := big.NewInt(0)
b.SetBytes(decodeData)
acc := &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: false,
}
return utils.WriteTo(w, req, acc)
} else {
acc, err := a.getAccount(addr, h)
if err != nil {
return err
}
return utils.WriteTo(w, req, acc)
}
}
func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
key, err := polo.ParseBytes32(mux.Vars(req)["key"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "key"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
storage, err := a.getStorage(addr, key, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"value": storage.String()})
}
func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error {
callData := &CallData{}
if err := utils.ParseJSON(req.Body, &callData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
var addr *polo.Address
if mux.Vars(req)["address"] != "" {
address, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
addr = &address
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: addr,
Value: callData.Value,
Data: callData.Data,
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results[0])
}
func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error {
batchCallData := &BatchCallData{}
if err := utils.ParseJSON(req.Body, &batchCallData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results)
}
func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) {
gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData)
if err != nil {
return nil, err
}
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
signer, _ := header.Signer()
rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state,
&xenv.BlockContext{
Beneficiary: header.Beneficiary(),
Signer: signer,
Number: header.Number(),
Time: header.Timestamp(),
GasLimit: header.GasLimit(),
TotalScore: header.TotalScore()})
results = make(BatchCallResults, 0)
vmout := make(chan *runtime.Output, 1)
for i, clause := range clauses {
exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{
Origin: *caller,
GasPrice: gasPrice})
go func() {
out, _ := exec()
vmout <- out
}()
select {
case <-ctx.Done():
interrupt()
return nil, ctx.Err()
case out := <-vmout:
if err := rt.Seeker().Err(); err != nil {
return nil, err
}
if err := state.Err(); err != nil {
return nil, err
}
results = append(results, convertCallResultWithInputGas(out, gas))
if out.VMErr != nil {
return results, nil
}
gas = out.LeftOverGas
}
}
return results, nil
}
func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) {
if batchCallData.Gas > a.callGasLimit {
return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit"))
} else if batchCallData.Gas == 0 {
gas = a.callGasLimit
} else {
gas = batchCallData.Gas
}
if batchCallData.GasPrice == nil {
gasPrice = new(big.Int)
} else {
gasPrice = (*big.Int)(batchCallData.GasPrice)
}
if batchCallData.Caller == nil {
caller = &polo.Address{}
} else {
caller = batchCallData.Caller
}
clauses = make([]*tx.Clause, len(batchCallData.Clauses))
for i, c := range batchCallData.Clauses {
var value *big.Int
if c.Value == nil {
value = new(big.Int)
} else {
value = (*big.Int)(c.Value)
}
var data []byte
if c.Data != "" {
data, err = hexutil.Decode(c.Data)
if err != nil {
err = utils.BadRequest(errors.WithMessage(err, fmt.Sprintf("data[%d]", i)))
return
}
}
clauses[i] = tx.NewClause(c.To).WithData(data).WithValue(value)
}
return
}
func (a *Accounts) handleRevision(revision string) (*block.Header, error) {
if | revision == "" || revision == "best" {
return a.chain.BestBlock().Header(), nil
}
if len(revision) == 66 || len(revision) == 64 {
blockID, err := polo.ParseBytes32(revision)
if err != nil {
return nil, utils.BadRequest(errors.WithMessage(err, "revision"))
}
h, err := a.chain.GetBlockHeader(blockID)
if err != nil {
if a.chain.IsNotFound(err) {
return nil, utils.BadRequest(errors.WithMessage(err, "revision"))
}
return nil, err
}
return h, nil
}
n, err := strconv.ParseUint(revision, 0, 0)
if err != nil {
return nil, utils.BadRequest(errors.WithMessage(err, "revision")) | identifier_body |
|
accounts.go | GasLimit uint64
}
func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts {
return &Accounts{
chain,
stateCreator,
callGasLimit,
}
}
func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return nil, err
}
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return code, nil
}
func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error {
hexAddr := mux.Vars(req)["address"]
addr, err := polo.ParseAddress(hexAddr)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
code, err := a.getCode(addr, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)})
}
func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) {
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
b := state.GetBalance(addr)
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: len(code) != 0,
}, nil
}
func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return polo.Bytes32{}, err
}
storage := state.GetStorage(addr, key)
if err := state.Err(); err != nil {
return polo.Bytes32{}, err
}
return storage, nil
}
func (a *Accounts) handleGetAccount(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
tokenAddress := req.URL.Query().Get("tokenAddress")
if len(tokenAddress) > 0 {
//获取ERC20 token balance
contractAddress, err := polo.ParseAddress(tokenAddress)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
callData := &CallData{}
hexStr := balanceSig
data := poloclient.FromHex(hexStr)
bytesAddr := addr.Bytes()
for i := 0; i < 32 - len(bytesAddr); i++ {
data = append(data, 0)
}
for _, v := range bytesAddr {
data = append(data, byte(v))
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: &contractAddress,
Value: callData.Value,
Data: hexutil.Encode(data),
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
decodeData, err := hexutil.Decode(results[0].Data)
if err != nil {
return err
}
if len(decodeData) > 32 {
return fmt.Errorf("decodeData error")
}
b := big.NewInt(0)
b.SetBytes(decodeData)
acc := &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: false,
}
return utils.WriteTo(w, req, acc)
} else {
acc, err := a.getAccount(addr, h)
if err != nil {
return err
}
return utils.WriteTo(w, req, acc)
}
}
func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
key, err := polo.ParseBytes32(mux.Vars(req)["key"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "key"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
storage, err := a.getStorage(addr, key, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"value": storage.String()})
}
func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error {
callData := &CallData{}
if err := utils.ParseJSON(req.Body, &callData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
var addr *polo.Address
if mux.Vars(req)["address"] != "" {
address, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
addr = &address
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: addr,
Value: callData.Value,
Data: callData.Data,
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results[0])
}
func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error {
batchCallData := &BatchCallData{}
if err := utils.ParseJSON(req.Body, &batchCallData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results)
}
func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) {
gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData)
if err != nil {
return nil, err
}
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
signer, _ := header.Signer()
rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state,
&xenv.BlockContext{
Beneficiary: header.Beneficiary(),
Signer: signer,
Number: header.Number(),
Time: header.Timestamp(),
GasLimit: header.GasLimit(),
TotalScore: header.TotalScore()})
results = make(BatchCallResults, 0)
vmout := make(chan *runtime.Output, 1)
for i, clause := range clauses {
exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{
Origin: *caller,
GasPrice: gasPrice})
go func() {
out, _ := exec()
vmout <- out
}()
select {
case <-ctx.Done():
interrupt()
return nil, ctx.Err()
case out := <-vmout:
if err := rt.Seeker().Err(); err != nil {
return nil, err
}
if err := state.Err(); err != nil {
| esults = append(results, convertCallResultWithInputGas(out, gas))
if out.VMErr != nil {
return results, nil
}
gas = out.LeftOverGas
}
}
return results, nil
}
func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) {
if batchCallData.Gas > a.callGasLimit {
return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit"))
} else if batchCallData.Gas == 0 {
gas = a.callGasLimit
} else {
gas = batch | return nil, err
}
r | conditional_block |
accounts.go | callGasLimit uint64
}
func New(chain *chain.Chain, stateCreator *state.Creator, callGasLimit uint64) *Accounts {
return &Accounts{
chain,
stateCreator,
callGasLimit,
}
}
func (a *Accounts) getCode(addr polo.Address, stateRoot polo.Bytes32) ([]byte, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return nil, err
}
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return code, nil
}
func (a *Accounts) handleGetCode(w http.ResponseWriter, req *http.Request) error {
hexAddr := mux.Vars(req)["address"]
addr, err := polo.ParseAddress(hexAddr)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
code, err := a.getCode(addr, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"code": hexutil.Encode(code)})
}
func (a *Accounts) getAccount(addr polo.Address, header *block.Header) (*Account, error) {
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
b := state.GetBalance(addr)
code := state.GetCode(addr)
if err := state.Err(); err != nil {
return nil, err
}
return &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: len(code) != 0,
}, nil
}
func (a *Accounts) getStorage(addr polo.Address, key polo.Bytes32, stateRoot polo.Bytes32) (polo.Bytes32, error) {
state, err := a.stateCreator.NewState(stateRoot)
if err != nil {
return polo.Bytes32{}, err
}
storage := state.GetStorage(addr, key)
if err := state.Err(); err != nil {
return polo.Bytes32{}, err
}
return storage, nil
}
func (a *Accounts) h | w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
tokenAddress := req.URL.Query().Get("tokenAddress")
if len(tokenAddress) > 0 {
//获取ERC20 token balance
contractAddress, err := polo.ParseAddress(tokenAddress)
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
callData := &CallData{}
hexStr := balanceSig
data := poloclient.FromHex(hexStr)
bytesAddr := addr.Bytes()
for i := 0; i < 32 - len(bytesAddr); i++ {
data = append(data, 0)
}
for _, v := range bytesAddr {
data = append(data, byte(v))
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: &contractAddress,
Value: callData.Value,
Data: hexutil.Encode(data),
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
decodeData, err := hexutil.Decode(results[0].Data)
if err != nil {
return err
}
if len(decodeData) > 32 {
return fmt.Errorf("decodeData error")
}
b := big.NewInt(0)
b.SetBytes(decodeData)
acc := &Account{
Balance: math.HexOrDecimal256(*b),
HasCode: false,
}
return utils.WriteTo(w, req, acc)
} else {
acc, err := a.getAccount(addr, h)
if err != nil {
return err
}
return utils.WriteTo(w, req, acc)
}
}
func (a *Accounts) handleGetStorage(w http.ResponseWriter, req *http.Request) error {
addr, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
key, err := polo.ParseBytes32(mux.Vars(req)["key"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "key"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
storage, err := a.getStorage(addr, key, h.StateRoot())
if err != nil {
return err
}
return utils.WriteTo(w, req, map[string]string{"value": storage.String()})
}
func (a *Accounts) handleCallContract(w http.ResponseWriter, req *http.Request) error {
callData := &CallData{}
if err := utils.ParseJSON(req.Body, &callData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
var addr *polo.Address
if mux.Vars(req)["address"] != "" {
address, err := polo.ParseAddress(mux.Vars(req)["address"])
if err != nil {
return utils.BadRequest(errors.WithMessage(err, "address"))
}
addr = &address
}
var batchCallData = &BatchCallData{
Clauses: Clauses{
Clause{
To: addr,
Value: callData.Value,
Data: callData.Data,
},
},
Gas: callData.Gas,
GasPrice: callData.GasPrice,
Caller: callData.Caller,
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results[0])
}
func (a *Accounts) handleCallBatchCode(w http.ResponseWriter, req *http.Request) error {
batchCallData := &BatchCallData{}
if err := utils.ParseJSON(req.Body, &batchCallData); err != nil {
return utils.BadRequest(errors.WithMessage(err, "body"))
}
h, err := a.handleRevision(req.URL.Query().Get("revision"))
if err != nil {
return err
}
results, err := a.batchCall(req.Context(), batchCallData, h)
if err != nil {
return err
}
return utils.WriteJSON(w, results)
}
func (a *Accounts) batchCall(ctx context.Context, batchCallData *BatchCallData, header *block.Header) (results BatchCallResults, err error) {
gas, gasPrice, caller, clauses, err := a.handleBatchCallData(batchCallData)
if err != nil {
return nil, err
}
state, err := a.stateCreator.NewState(header.StateRoot())
if err != nil {
return nil, err
}
signer, _ := header.Signer()
rt := runtime.New(a.chain.NewSeeker(header.ParentID()), state,
&xenv.BlockContext{
Beneficiary: header.Beneficiary(),
Signer: signer,
Number: header.Number(),
Time: header.Timestamp(),
GasLimit: header.GasLimit(),
TotalScore: header.TotalScore()})
results = make(BatchCallResults, 0)
vmout := make(chan *runtime.Output, 1)
for i, clause := range clauses {
exec, interrupt := rt.PrepareClause(clause, uint32(i), gas, &xenv.TransactionContext{
Origin: *caller,
GasPrice: gasPrice})
go func() {
out, _ := exec()
vmout <- out
}()
select {
case <-ctx.Done():
interrupt()
return nil, ctx.Err()
case out := <-vmout:
if err := rt.Seeker().Err(); err != nil {
return nil, err
}
if err := state.Err(); err != nil {
return nil, err
}
results = append(results, convertCallResultWithInputGas(out, gas))
if out.VMErr != nil {
return results, nil
}
gas = out.LeftOverGas
}
}
return results, nil
}
func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (gas uint64, gasPrice *big.Int, caller *polo.Address, clauses []*tx.Clause, err error) {
if batchCallData.Gas > a.callGasLimit {
return 0, nil, nil, nil, utils.Forbidden(errors.New("gas: exceeds limit"))
} else if batchCallData.Gas == 0 {
gas = a.callGasLimit
} else {
gas = batch | andleGetAccount( | identifier_name |
main.rs | >,
}
#[derive(StructOpt, Debug)]
#[structopt(name = "client")]
struct Opt {
/// Address to connect to
#[structopt(long="url", default_value="quic://localhost:4433")]
url: Url,
/// TLS certificate in PEM format
#[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")]
cert: PathBuf,
/// Accept any TLS certificate from the server even if it is invalid
#[structopt(short="a", long="accept_any")]
accept_any_cert: bool
}
fn main() -> Result<(), Box<dyn std::error::Error>> |
#[tokio::main]
async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> {
let path = std::env::current_dir().unwrap();
println!("The current directory is {}", path.display());
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(Level::INFO)
.finish(),
)
.expect("Failed to configure logging");
// Resolve URL from options
let url = options.url;
let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433))
.to_socket_addrs()?
.next()
.expect("couldn't resolve to an address");
// Create a Bevy app
let mut app = App::build();
let cert = get_cert(&options.cert)?;
app.add_plugin(bounded_planet::networking::client::plugin::Network {
addr: remote,
url,
cert,
accept_any_cert: options.accept_any_cert
});
app.init_resource::<PingResponderState>();
app.add_system(respond_to_pings.system());
app.init_resource::<NetEventLoggerState>();
app.add_system(log_net_events.system());
app.init_resource::<MoveCam>();
app.add_resource(Msaa { samples: 4 });
app.add_default_plugins();
app.add_plugin(CameraBPPlugin::default());
app.add_startup_system(setup_scene.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system());
app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE);
app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system());
app.add_system(play_every_sound_on_mb1.system());
app.init_resource::<TileReceivedState>();
app.add_system(handle_tile_received.system());
app.init_resource::<RequestTileOnConnectedState>();
app.add_system(request_tile_on_connected.system());
// Run it forever
app.run();
Ok(())
}
/// Fetch certificates to use
fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> {
info!("Loading Cert: {:?}", cert_path);
Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?)
}
#[derive(Default)]
pub struct PingResponderState {
pub event_reader: EventReader<ReceiveEvent>,
}
fn respond_to_pings(
mut state: ResMut<PingResponderState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut sender: ResMut<Events<SendEvent>>,
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt {
if let Packet::Ping(Ping { timestamp }) = **data {
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::PingPong,
data: Arc::new(Packet::Pong(Pong { timestamp }))
});
info!("Received Ping, sending pong. {:?}", connection);
}
}
}
}
#[derive(Default)]
pub struct TileReceivedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When a tile is received from the server, we load it into the scene
fn handle_tile_received(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut state: ResMut<TileReceivedState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut meshes: ResMut<Assets<Mesh>>,
mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt {
if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() {
info!("Loading tile received from server.");
let land_texture_top_handle = asset_server
.load_sync(&mut textures, "content/textures/CoveWorldTop.png")
.expect("Failed to load CoveWorldTop.png");
commands.spawn(PbrComponents {
mesh: meshes.add(Mesh {
primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList,
attributes: vec![
VertexAttribute::position(mesh_data.vertices),
VertexAttribute::normal(mesh_data.normals),
VertexAttribute::uv(mesh_data.uvs),
],
indices: Some(mesh_data.indices),
}),
material: materials.add(StandardMaterial {
albedo_texture: Some(land_texture_top_handle),
shaded: true,
..Default::default()
}),
..Default::default()
});
info!("Finished loading tile.");
}
}
}
}
#[derive(Default)]
struct RequestTileOnConnectedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When the client connects to the server, request a tile
fn request_tile_on_connected(
mut state: ResMut<RequestTileOnConnectedState>,
mut sender: ResMut<Events<SendEvent>>,
receiver: ResMut<Events<ReceiveEvent>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::Connected(connection, _) = evt {
info!("Requesting tile because connected to server...");
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::WorldTileData,
data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest {
//todo(#46): Respect request coordinates (x, y lod)
x: 0,
y: 0,
lod: 0
}))
});
}
}
}
/// set up a simple 3D scene with landscape?
fn setup_scene(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut meshes: ResMut<Assets<Mesh>>,
// mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut sounds: ResMut<Assets<AudioSource>>,
) {
asset_server
.load_sync(&mut sounds, "content/textures/test_sound.mp3")
.expect("Failed to load test_sound.mp3");
// add entities to the world
commands
// cube
.spawn(PbrComponents {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()),
transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)),
..Default::default()
})
// light
.spawn(LightComponents {
transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)),
light: Light {
color: Color::WHITE,
fov: 90f32,
depth: 0f32..100.0
},
..Default::default()
})
// camera
.spawn(Camera3dComponents {
transform: Transform::from_translation_rotation(
Vec3::new(20.0, 20.0, 20.0),
Quat::from_rotation_ypr(2.7, -0.75, 0.0)
),
..Default::default()
})
.with(CameraBPConfig {
forward_weight: -0.01,
back_weight: 0.01,
left_weight: -0.01,
right_weight: 0.01,
..Default::default()
});
}
/// Pushes camera actions based upon mouse movements near the window edge.
fn act_camera_on_window_edge(
wins: Res<Windows>,
pos: Res<Events<CursorMoved>>,
mut mcam: ResMut<MoveCam>,
) {
if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) {
let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y());
let window = wins.get(e.id).expect("Couldn't get primary window.");
let (window_x, window_y) = (window.width as f32, window.height as f32);
// map (mouse_x, mouse_y) into [-1, 1]^2
mouse_x /= window_x / 2.0;
mouse_y /= window_y / 2.0;
mouse_x -= 1.0;
mouse_y -= 1.0;
let angle = mouse_x.atan2(mouse_y);
let (ax, ay) = (angle.sin(), angle.cos | {
let opt = Opt::from_args();
run(opt)
} | identifier_body |
main.rs | >,
}
#[derive(StructOpt, Debug)]
#[structopt(name = "client")]
struct Opt {
/// Address to connect to
#[structopt(long="url", default_value="quic://localhost:4433")]
url: Url,
/// TLS certificate in PEM format
#[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")]
cert: PathBuf,
/// Accept any TLS certificate from the server even if it is invalid
#[structopt(short="a", long="accept_any")]
accept_any_cert: bool
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
run(opt)
}
#[tokio::main]
async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> {
let path = std::env::current_dir().unwrap();
println!("The current directory is {}", path.display());
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(Level::INFO)
.finish(),
)
.expect("Failed to configure logging");
// Resolve URL from options
let url = options.url;
let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433))
.to_socket_addrs()?
.next()
.expect("couldn't resolve to an address");
// Create a Bevy app
let mut app = App::build();
let cert = get_cert(&options.cert)?;
app.add_plugin(bounded_planet::networking::client::plugin::Network {
addr: remote,
url,
cert,
accept_any_cert: options.accept_any_cert
});
app.init_resource::<PingResponderState>();
app.add_system(respond_to_pings.system());
app.init_resource::<NetEventLoggerState>();
app.add_system(log_net_events.system());
app.init_resource::<MoveCam>();
app.add_resource(Msaa { samples: 4 });
app.add_default_plugins();
app.add_plugin(CameraBPPlugin::default());
app.add_startup_system(setup_scene.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system());
app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE);
app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system());
app.add_system(play_every_sound_on_mb1.system());
app.init_resource::<TileReceivedState>();
app.add_system(handle_tile_received.system());
app.init_resource::<RequestTileOnConnectedState>();
app.add_system(request_tile_on_connected.system());
// Run it forever
app.run();
Ok(())
}
/// Fetch certificates to use
fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> {
info!("Loading Cert: {:?}", cert_path);
Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?)
}
#[derive(Default)]
pub struct PingResponderState {
pub event_reader: EventReader<ReceiveEvent>,
}
fn respond_to_pings(
mut state: ResMut<PingResponderState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut sender: ResMut<Events<SendEvent>>,
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt {
if let Packet::Ping(Ping { timestamp }) = **data {
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::PingPong,
data: Arc::new(Packet::Pong(Pong { timestamp }))
});
info!("Received Ping, sending pong. {:?}", connection);
}
}
}
}
#[derive(Default)]
pub struct TileReceivedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When a tile is received from the server, we load it into the scene
fn handle_tile_received(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut state: ResMut<TileReceivedState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut meshes: ResMut<Assets<Mesh>>,
mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt {
if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() {
info!("Loading tile received from server.");
let land_texture_top_handle = asset_server
.load_sync(&mut textures, "content/textures/CoveWorldTop.png")
.expect("Failed to load CoveWorldTop.png");
commands.spawn(PbrComponents {
mesh: meshes.add(Mesh {
primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList,
attributes: vec![
VertexAttribute::position(mesh_data.vertices),
VertexAttribute::normal(mesh_data.normals),
VertexAttribute::uv(mesh_data.uvs),
],
indices: Some(mesh_data.indices),
}),
material: materials.add(StandardMaterial {
albedo_texture: Some(land_texture_top_handle),
shaded: true,
..Default::default()
}),
..Default::default()
});
info!("Finished loading tile.");
}
}
}
}
#[derive(Default)]
struct RequestTileOnConnectedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When the client connects to the server, request a tile
fn | (
mut state: ResMut<RequestTileOnConnectedState>,
mut sender: ResMut<Events<SendEvent>>,
receiver: ResMut<Events<ReceiveEvent>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::Connected(connection, _) = evt {
info!("Requesting tile because connected to server...");
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::WorldTileData,
data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest {
//todo(#46): Respect request coordinates (x, y lod)
x: 0,
y: 0,
lod: 0
}))
});
}
}
}
/// set up a simple 3D scene with landscape?
fn setup_scene(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut meshes: ResMut<Assets<Mesh>>,
// mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut sounds: ResMut<Assets<AudioSource>>,
) {
asset_server
.load_sync(&mut sounds, "content/textures/test_sound.mp3")
.expect("Failed to load test_sound.mp3");
// add entities to the world
commands
// cube
.spawn(PbrComponents {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()),
transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)),
..Default::default()
})
// light
.spawn(LightComponents {
transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)),
light: Light {
color: Color::WHITE,
fov: 90f32,
depth: 0f32..100.0
},
..Default::default()
})
// camera
.spawn(Camera3dComponents {
transform: Transform::from_translation_rotation(
Vec3::new(20.0, 20.0, 20.0),
Quat::from_rotation_ypr(2.7, -0.75, 0.0)
),
..Default::default()
})
.with(CameraBPConfig {
forward_weight: -0.01,
back_weight: 0.01,
left_weight: -0.01,
right_weight: 0.01,
..Default::default()
});
}
/// Pushes camera actions based upon mouse movements near the window edge.
fn act_camera_on_window_edge(
wins: Res<Windows>,
pos: Res<Events<CursorMoved>>,
mut mcam: ResMut<MoveCam>,
) {
if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) {
let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y());
let window = wins.get(e.id).expect("Couldn't get primary window.");
let (window_x, window_y) = (window.width as f32, window.height as f32);
// map (mouse_x, mouse_y) into [-1, 1]^2
mouse_x /= window_x / 2.0;
mouse_y /= window_y / 2.0;
mouse_x -= 1.0;
mouse_y -= 1.0;
let angle = mouse_x.atan2(mouse_y);
let (ax, ay) = (angle.sin(), angle.cos());
| request_tile_on_connected | identifier_name |
main.rs | _value="./certs/cert.pem")]
cert: PathBuf,
/// Accept any TLS certificate from the server even if it is invalid
#[structopt(short="a", long="accept_any")]
accept_any_cert: bool
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
run(opt)
}
#[tokio::main]
async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> {
let path = std::env::current_dir().unwrap();
println!("The current directory is {}", path.display());
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(Level::INFO)
.finish(),
)
.expect("Failed to configure logging");
// Resolve URL from options
let url = options.url;
let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433))
.to_socket_addrs()?
.next()
.expect("couldn't resolve to an address");
// Create a Bevy app
let mut app = App::build();
let cert = get_cert(&options.cert)?;
app.add_plugin(bounded_planet::networking::client::plugin::Network {
addr: remote,
url,
cert,
accept_any_cert: options.accept_any_cert
});
app.init_resource::<PingResponderState>();
app.add_system(respond_to_pings.system());
app.init_resource::<NetEventLoggerState>();
app.add_system(log_net_events.system());
app.init_resource::<MoveCam>();
app.add_resource(Msaa { samples: 4 });
app.add_default_plugins();
app.add_plugin(CameraBPPlugin::default());
app.add_startup_system(setup_scene.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system());
app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE);
app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system());
app.add_system(play_every_sound_on_mb1.system());
app.init_resource::<TileReceivedState>();
app.add_system(handle_tile_received.system());
app.init_resource::<RequestTileOnConnectedState>();
app.add_system(request_tile_on_connected.system());
// Run it forever
app.run();
Ok(())
}
/// Fetch certificates to use
fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> {
info!("Loading Cert: {:?}", cert_path);
Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?)
}
#[derive(Default)]
pub struct PingResponderState {
pub event_reader: EventReader<ReceiveEvent>,
}
fn respond_to_pings(
mut state: ResMut<PingResponderState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut sender: ResMut<Events<SendEvent>>,
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt {
if let Packet::Ping(Ping { timestamp }) = **data {
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::PingPong,
data: Arc::new(Packet::Pong(Pong { timestamp }))
});
info!("Received Ping, sending pong. {:?}", connection);
}
}
}
}
#[derive(Default)]
pub struct TileReceivedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When a tile is received from the server, we load it into the scene
fn handle_tile_received(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut state: ResMut<TileReceivedState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut meshes: ResMut<Assets<Mesh>>,
mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt {
if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() {
info!("Loading tile received from server.");
let land_texture_top_handle = asset_server
.load_sync(&mut textures, "content/textures/CoveWorldTop.png")
.expect("Failed to load CoveWorldTop.png");
commands.spawn(PbrComponents {
mesh: meshes.add(Mesh {
primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList,
attributes: vec![
VertexAttribute::position(mesh_data.vertices),
VertexAttribute::normal(mesh_data.normals),
VertexAttribute::uv(mesh_data.uvs),
],
indices: Some(mesh_data.indices),
}),
material: materials.add(StandardMaterial {
albedo_texture: Some(land_texture_top_handle),
shaded: true,
..Default::default()
}),
..Default::default()
});
info!("Finished loading tile.");
}
}
}
}
#[derive(Default)]
struct RequestTileOnConnectedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When the client connects to the server, request a tile
fn request_tile_on_connected(
mut state: ResMut<RequestTileOnConnectedState>,
mut sender: ResMut<Events<SendEvent>>,
receiver: ResMut<Events<ReceiveEvent>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::Connected(connection, _) = evt {
info!("Requesting tile because connected to server...");
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::WorldTileData,
data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest {
//todo(#46): Respect request coordinates (x, y lod)
x: 0,
y: 0,
lod: 0
}))
});
}
}
}
/// set up a simple 3D scene with landscape?
fn setup_scene(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut meshes: ResMut<Assets<Mesh>>,
// mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut sounds: ResMut<Assets<AudioSource>>,
) {
asset_server
.load_sync(&mut sounds, "content/textures/test_sound.mp3")
.expect("Failed to load test_sound.mp3");
// add entities to the world
commands
// cube
.spawn(PbrComponents {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()),
transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)),
..Default::default()
})
// light
.spawn(LightComponents {
transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)),
light: Light {
color: Color::WHITE,
fov: 90f32,
depth: 0f32..100.0
},
..Default::default()
})
// camera
.spawn(Camera3dComponents {
transform: Transform::from_translation_rotation(
Vec3::new(20.0, 20.0, 20.0),
Quat::from_rotation_ypr(2.7, -0.75, 0.0)
),
..Default::default()
})
.with(CameraBPConfig {
forward_weight: -0.01,
back_weight: 0.01,
left_weight: -0.01,
right_weight: 0.01,
..Default::default()
});
}
/// Pushes camera actions based upon mouse movements near the window edge.
fn act_camera_on_window_edge(
wins: Res<Windows>,
pos: Res<Events<CursorMoved>>,
mut mcam: ResMut<MoveCam>,
) {
if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) {
let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y());
let window = wins.get(e.id).expect("Couldn't get primary window.");
let (window_x, window_y) = (window.width as f32, window.height as f32);
// map (mouse_x, mouse_y) into [-1, 1]^2
mouse_x /= window_x / 2.0;
mouse_y /= window_y / 2.0;
mouse_x -= 1.0;
mouse_y -= 1.0;
let angle = mouse_x.atan2(mouse_y);
let (ax, ay) = (angle.sin(), angle.cos());
let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD)
&& (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); |
if !in_rect && ax.is_finite() && ay.is_finite() {
mcam.right = Some(ax);
mcam.forward = Some(ay);
} else { | random_line_split |
|
main.rs | ,
url,
cert,
accept_any_cert: options.accept_any_cert
});
app.init_resource::<PingResponderState>();
app.add_system(respond_to_pings.system());
app.init_resource::<NetEventLoggerState>();
app.add_system(log_net_events.system());
app.init_resource::<MoveCam>();
app.add_resource(Msaa { samples: 4 });
app.add_default_plugins();
app.add_plugin(CameraBPPlugin::default());
app.add_startup_system(setup_scene.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system());
app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE);
app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system());
app.add_system(play_every_sound_on_mb1.system());
app.init_resource::<TileReceivedState>();
app.add_system(handle_tile_received.system());
app.init_resource::<RequestTileOnConnectedState>();
app.add_system(request_tile_on_connected.system());
// Run it forever
app.run();
Ok(())
}
/// Fetch certificates to use
fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> {
info!("Loading Cert: {:?}", cert_path);
Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?)
}
#[derive(Default)]
pub struct PingResponderState {
pub event_reader: EventReader<ReceiveEvent>,
}
fn respond_to_pings(
mut state: ResMut<PingResponderState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut sender: ResMut<Events<SendEvent>>,
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt {
if let Packet::Ping(Ping { timestamp }) = **data {
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::PingPong,
data: Arc::new(Packet::Pong(Pong { timestamp }))
});
info!("Received Ping, sending pong. {:?}", connection);
}
}
}
}
#[derive(Default)]
pub struct TileReceivedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When a tile is received from the server, we load it into the scene
fn handle_tile_received(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut state: ResMut<TileReceivedState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut meshes: ResMut<Assets<Mesh>>,
mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt {
if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() {
info!("Loading tile received from server.");
let land_texture_top_handle = asset_server
.load_sync(&mut textures, "content/textures/CoveWorldTop.png")
.expect("Failed to load CoveWorldTop.png");
commands.spawn(PbrComponents {
mesh: meshes.add(Mesh {
primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList,
attributes: vec![
VertexAttribute::position(mesh_data.vertices),
VertexAttribute::normal(mesh_data.normals),
VertexAttribute::uv(mesh_data.uvs),
],
indices: Some(mesh_data.indices),
}),
material: materials.add(StandardMaterial {
albedo_texture: Some(land_texture_top_handle),
shaded: true,
..Default::default()
}),
..Default::default()
});
info!("Finished loading tile.");
}
}
}
}
#[derive(Default)]
struct RequestTileOnConnectedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When the client connects to the server, request a tile
fn request_tile_on_connected(
mut state: ResMut<RequestTileOnConnectedState>,
mut sender: ResMut<Events<SendEvent>>,
receiver: ResMut<Events<ReceiveEvent>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::Connected(connection, _) = evt {
info!("Requesting tile because connected to server...");
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::WorldTileData,
data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest {
//todo(#46): Respect request coordinates (x, y lod)
x: 0,
y: 0,
lod: 0
}))
});
}
}
}
/// set up a simple 3D scene with landscape?
fn setup_scene(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut meshes: ResMut<Assets<Mesh>>,
// mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut sounds: ResMut<Assets<AudioSource>>,
) {
asset_server
.load_sync(&mut sounds, "content/textures/test_sound.mp3")
.expect("Failed to load test_sound.mp3");
// add entities to the world
commands
// cube
.spawn(PbrComponents {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()),
transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)),
..Default::default()
})
// light
.spawn(LightComponents {
transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)),
light: Light {
color: Color::WHITE,
fov: 90f32,
depth: 0f32..100.0
},
..Default::default()
})
// camera
.spawn(Camera3dComponents {
transform: Transform::from_translation_rotation(
Vec3::new(20.0, 20.0, 20.0),
Quat::from_rotation_ypr(2.7, -0.75, 0.0)
),
..Default::default()
})
.with(CameraBPConfig {
forward_weight: -0.01,
back_weight: 0.01,
left_weight: -0.01,
right_weight: 0.01,
..Default::default()
});
}
/// Pushes camera actions based upon mouse movements near the window edge.
fn act_camera_on_window_edge(
wins: Res<Windows>,
pos: Res<Events<CursorMoved>>,
mut mcam: ResMut<MoveCam>,
) {
if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) {
let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y());
let window = wins.get(e.id).expect("Couldn't get primary window.");
let (window_x, window_y) = (window.width as f32, window.height as f32);
// map (mouse_x, mouse_y) into [-1, 1]^2
mouse_x /= window_x / 2.0;
mouse_y /= window_y / 2.0;
mouse_x -= 1.0;
mouse_y -= 1.0;
let angle = mouse_x.atan2(mouse_y);
let (ax, ay) = (angle.sin(), angle.cos());
let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD)
&& (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD);
if !in_rect && ax.is_finite() && ay.is_finite() {
mcam.right = Some(ax);
mcam.forward = Some(ay);
} else {
mcam.right = None;
mcam.forward = None;
}
}
}
/// Pushes camera actions based upon scroll wheel movement.
fn act_on_scroll_wheel(
mouse_wheel: Res<Events<MouseWheel>>,
mut acts: ResMut<Events<CameraBPAction>>,
) {
for mw in mouse_wheel.get_reader().iter(&mouse_wheel) {
/// If scrolling units are reported in lines rather than pixels,
/// multiply the returned horizontal scrolling amount by this.
const LINE_SIZE: f32 = 14.0;
let w = mw.y.abs()
* if let MouseScrollUnit::Line = mw.unit {
LINE_SIZE
} else {
1.0
};
if mw.y > 0.0 {
acts.send(CameraBPAction::ZoomIn(Some(w)))
} else if mw.y < 0.0 {
acts.send(CameraBPAction::ZoomOut(Some(w)))
}
}
}
/// Depending on `dirty`, either update the local `cache` or fill the event
/// queue for [`CameraBPAction`] with the locally cached copy.
fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) {
if let Some(w) = mcam.right | {
acts.send(CameraBPAction::MoveRight(Some(w)))
} | conditional_block |
|
rzline.py | ', '海口市', '广州市', '深圳市',
]
# 查询日期
quoteDate = time.mktime(time.strptime(Today, '%Y%m%d'))
quoteDate = str(quoteDate).replace('.', '') + '00'
quoteType_dict = {
'e': '电票',
'se': '小电票',
# 's': '纸票',
# 'b': '商票',
}
# 类型字典
kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农',
'busEle': '电子', 'busPaper': '纸质',
'gq': '国企', 'yq': '央企',
'ss': '上市公司', 'my': '民营企业'}
# 业务类型字典
busType_dict = {"1": "买断", "2": "直贴", "0": ""}
# 查询条数
rows = 100
formdata = {
"page": 1,
"city": "",
"rows": rows,
"orderBy": "2",
"quoteType": "",
"detailType": "",
"quoteDate": quoteDate,
"appVersion": "iOS2.6.1",
"ifDefaultCity": "false",
}
formdata1 = {
"quoteDate": "",
"quoteType": "",
"orgUserId": "",
"appVersion": "2.6.1",
}
headers = {
"Content-Length": "82",
"Host": "www.rzline.com",
"Connection": "keep-alive",
"Origin": "http://www.rzline.com",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json;charset=UTF-8", # 必须添加
"Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加
"Referer": "http://www.rzline.com/web/front/quoteMarket/show",
}
# 连接数据库
db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345')
cur = db.cursor()
def parse(self, response):
base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
for city in self.city_list:
for type in self.quoteType_dict:
formdata = copy.deepcopy(self.formdata)
formdata['city'] = city
formdata['quoteType'] = type
if city == '上海市':
formdata['ifDefaultCity'] = 'true'
pass
url = base_url + '&page=' + str(formdata['page'])
yield scrapy.Request(
url=url,
dont_filter=True,
callback=self.parse_id,
errback=self.hand_error,
meta={'data': formdata, 'header': self.headers, 'city': city},
)
def parse_id(self, response):
flag = 1
city = response.meta['city']
data = response.body.decode()
json_data = json.loads(data)
id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail'
print('当前页数:{}'.format(response.meta['data']['page']))
print('data=', response.meta['data'])
try:
data_list = json_data['data']
if data_list:
for i in data_list:
user_id = i['orgUserId']
company = i['orgSimpleName']
price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else []
if len(price_list):
formdata1 = copy.deepcopy(self.formdata1)
formdata1['orgUserId'] = str(user_id)
formdata1['quoteDate'] = i['quoteDate']
formdata1['quoteType'] = response.meta['data']['quoteType']
url = id_url + '&orgUserId=' + str(user_id) + \
'"eType=' + formdata1['quoteType']
print(user_id, ':', company, ':', len(price_list),
':', city, ':', formdata1['quoteType'])
self.logger.info(str(user_id)+':'+company+':'+str(len(price_list))
+ ':' + city + ':' + formdata1['quoteType'])
yield scrapy.Request(url=url,
priority=1,
callback=self.parse_detail,
errback=self.hand_error,
meta={'data': formdata1,
'city': city,
'header': self.headers,
'user_id': user_id,
'quoteType': formdata1['quoteType'],
}
)
else:
flag = 0
except Exception as e:
# 发送邮件
title = '爬虫' + self.name + '异常'
error_info = misc.get_error_info(str(e))
content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason']
EmailSender().send(title, content)
raise CloseSpider
# # 下一页: rows设置为100时,不需要下一页
# if flag:
# # self.logger.debug('当前页数:{}'.format(response.meta['data']['page']))
# print('当前页数:{}'.format(response.meta['data']['page']))
# print('data=', response.meta['data']) | # formdata['page'] += 1
# base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
# url = base_url + '&page=' + str(formdata['page'])
# yield scrapy.Request(
# url=url,
# callback=self.parse_id,
# errback=self.hand_error,
# meta={'data': formdata,
# 'city': city,
# 'header': self.headers},
# )
def parse_detail(self, response):
res = response.body.decode('utf-8')
data = json.loads(res)
data = data['data']
price_list = data.get('quotePriceDetailList', [])
if not price_list:
print('formdata1:', response.meta['data'])
else:
print('city', response.meta['city'])
for price in price_list:
item = RzlineItem()
try:
# 发布时间
F2 = price['createTime'] if price['createTime'] else None
F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else ''
item['F2'] = F2
# 机构
# simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else ''
whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else ''
item['F3'] = whole_name if whole_name else ''
# 金额
item['F4'] = price['price'] if price['price'] else ''
# 类型
detail_type = price['detailType'] if price['detailType'] else ''
quote_btype = price['quoteBType'] if price['quoteBType'] else ''
if detail_type in ['gg', 'sh', 'sn']:
kind = self.kind_dict[detail_type]
elif detail_type in ['busEle', 'busPaper']:
kind = self.kind_dict[quote_btype]
else:
kind = ''
item['F5'] = kind
# 每十万
F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else ''
item['F6'] = F6 + '元' if F6 else ''
# 期限
F7 = price['quoteDays'] if price['quoteDays'] else ''
item['F7'] = F7
# 额度
item['F8'] = price['quoteAmount']
# 业务类型(买断、直贴)
item['F9'] = '直贴'
# 联系人
F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else ''
item['F10'] = F10
# 联系方式
F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else ''
item['F11'] = F11
item['F12'] = self.quoteType_dict[response.meta['quoteType']]
# 原始业务类型
item['F13'] = self.busType_dict[data['busType']] if data['busType'] else ''
# 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票
item['F1'] = self._get_uuid(item)
# FT, FV, FP, FU, FS
FS = data['ifStartTrad'] if data['ifStartTrad'] else None
item['FS'] = | # formdata = response.meta['data'] | random_line_split |
rzline.py | '海口市', '广州市', '深圳市',
]
# 查询日期
quoteDate = time.mktime(time.strptime(Today, '%Y%m%d'))
quoteDate = str(quoteDate).replace('.', '') + '00'
quoteType_dict = {
'e': '电票',
'se': '小电票',
# 's': '纸票',
# 'b': '商票',
}
# 类型字典
kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农',
'busEle': '电子', 'busPaper': '纸质',
'gq': '国企', 'yq': '央企',
'ss': '上市公司', 'my': '民营企业'}
# 业务类型字典
busType_dict = {"1": "买断", "2": "直贴", "0": ""}
# 查询条数
rows = 100
formdata = {
"page": 1,
"city": "",
"rows": rows,
"orderBy": "2",
"quoteType": "",
"detailType": "",
"quoteDate": quoteDate,
"appVersion": "iOS2.6.1",
"ifDefaultCity": "false",
}
formdata1 = {
"quoteDate": "",
"quoteType": "",
"orgUserId": "",
"appVersion": "2.6.1",
}
headers = {
"Content-Length": "82",
"Host": "www.rzline.com",
"Connection": "keep-alive",
"Origin": "http://www.rzline.com",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json;charset=UTF-8", # 必须添加
"Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加
"Referer": "http://www.rzline.com/web/front/quoteMarket/show",
}
# 连接数据库
db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345')
cur = db.cursor()
def parse(self, response):
base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
for city in self.city_list:
for type in self.quoteType_dict:
formdata = copy.deepcopy(self.formdata)
formdata['city'] = city
formdata['quoteType'] = type
if city == '上海市':
formdata['ifDefaultCity'] = 'true'
pass
url = base_url + '&page=' + str(formdata['page'])
yield scrapy.Request(
url=url,
dont_filter=True,
callback=self.parse_id,
errback=self.hand_error,
meta={'data': formdata, 'header': self.headers, 'city': city},
)
def parse_id(self, response):
flag = 1
city = response.meta['city']
data = response.body.decode()
json_data = json.loads(data)
id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail'
print('当前页数:{}'.format(response.meta['data']['page']))
print('data=', response.meta['data'])
try:
data_list = json_data['data']
| f data_list:
for i in data_list:
user_id = i['orgUserId']
company = i['orgSimpleName']
price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else []
if len(price_list):
formdata1 = copy.deepcopy(self.formdata1)
formdata1['orgUserId'] = str(user_id)
formdata1['quoteDate'] = i['quoteDate']
formdata1['quoteType'] = response.meta['data']['quoteType']
url = id_url + '&orgUserId=' + str(user_id) + \
'"eType=' + formdata1['quoteType']
print(user_id, ':', company, ':', len(price_list),
':', city, ':', formdata1['quoteType'])
self.logger.info(str(user_id)+':'+company+':'+str(len(price_list))
+ ':' + city + ':' + formdata1['quoteType'])
yield scrapy.Request(url=url,
priority=1,
callback=self.parse_detail,
errback=self.hand_error,
meta={'data': formdata1,
'city': city,
'header': self.headers,
'user_id': user_id,
'quoteType': formdata1['quoteType'],
}
)
else:
flag = 0
except Exception as e:
# 发送邮件
title = '爬虫' + self.name + '异常'
error_info = misc.get_error_info(str(e))
content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason']
EmailSender().send(title, content)
raise CloseSpider
# # 下一页: rows设置为100时,不需要下一页
# if flag:
# # self.logger.debug('当前页数:{}'.format(response.meta['data']['page']))
# print('当前页数:{}'.format(response.meta['data']['page']))
# print('data=', response.meta['data'])
# formdata = response.meta['data']
# formdata['page'] += 1
# base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
# url = base_url + '&page=' + str(formdata['page'])
# yield scrapy.Request(
# url=url,
# callback=self.parse_id,
# errback=self.hand_error,
# meta={'data': formdata,
# 'city': city,
# 'header': self.headers},
# )
def parse_detail(self, response):
res = response.body.decode('utf-8')
data = json.loads(res)
data = data['data']
price_list = data.get('quotePriceDetailList', [])
if not price_list:
print('formdata1:', response.meta['data'])
else:
print('city', response.meta['city'])
for price in price_list:
item = RzlineItem()
try:
# 发布时间
F2 = price['createTime'] if price['createTime'] else None
F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else ''
item['F2'] = F2
# 机构
# simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else ''
whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else ''
item['F3'] = whole_name if whole_name else ''
# 金额
item['F4'] = price['price'] if price['price'] else ''
# 类型
detail_type = price['detailType'] if price['detailType'] else ''
quote_btype = price['quoteBType'] if price['quoteBType'] else ''
if detail_type in ['gg', 'sh', 'sn']:
kind = self.kind_dict[detail_type]
elif detail_type in ['busEle', 'busPaper']:
kind = self.kind_dict[quote_btype]
else:
kind = ''
item['F5'] = kind
# 每十万
F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else ''
item['F6'] = F6 + '元' if F6 else ''
# 期限
F7 = price['quoteDays'] if price['quoteDays'] else ''
item['F7'] = F7
# 额度
item['F8'] = price['quoteAmount']
# 业务类型(买断、直贴)
item['F9'] = '直贴'
# 联系人
F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else ''
item['F10'] = F10
# 联系方式
F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else ''
item['F11'] = F11
item['F12'] = self.quoteType_dict[response.meta['quoteType']]
# 原始业务类型
item['F13'] = self.busType_dict[data['busType']] if data['busType'] else ''
# 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票
item['F1'] = self._get_uuid(item)
# FT, FV, FP, FU, FS
FS = data['ifStartTrad'] if data['ifStartTrad'] else None
item['FS'] = | i | identifier_name |
rzline.py | '海口市', '广州市', '深圳市',
]
# 查询日期
quoteDate = time.mktime(time.strptime(Today, '%Y%m%d'))
quoteDate = str(quoteDate).replace('.', '') + '00'
quoteType_dict = {
'e': '电票',
'se': '小电票',
# 's': '纸票',
# 'b': '商票',
}
# 类型字典
kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农',
'busEle': '电子', 'busPaper': '纸质',
'gq': '国企', 'yq': '央企',
'ss': '上市公司', 'my': '民营企业'}
# 业务类型字典
busType_dict = {"1": "买断", "2": "直贴", "0": ""}
# 查询条数
rows = 100
formdata = {
"page": 1,
"city": "",
"rows": rows,
"orderBy": "2",
"quoteType": "",
"detailType": "",
"quoteDate": quoteDate,
"appVersion": "iOS2.6.1",
"ifDefaultCity": "false",
}
formdata1 = {
"quoteDate": "",
"quoteType": "",
"orgUserId": "",
"appVersion": "2.6.1",
}
headers = {
"Content-Length": "82",
"Host": "www.rzline.com",
"Connection": "keep-alive",
"Origin": "http://www.rzline.com",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json;charset=UTF-8", # 必须添加
"Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加
"Referer": "http://www.rzline.com/web/front/quoteMarket/show",
}
# 连接数据库
db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345')
cur = db.cursor()
def parse(self, response):
base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
for city in self.city_list:
for type in self.quoteType_dict:
formdata = copy.deepcopy(self.formdata)
formdata['city'] = city
formdata['quoteType'] = type
if city == '上海市':
formdata['ifDefaultCity'] = 'true'
pass
url = base_url + '&page=' + str(formdata['page'])
yield scrapy.Request(
url=url,
dont_filter=True,
callback=self.parse_id,
errback=self.hand_error,
meta={'data': formdata, 'header': self.headers, 'city': |
flag = 1
city = response.meta['city']
data = response.body.decode()
json_data = json.loads(data)
id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail'
print('当前页数:{}'.format(response.meta['data']['page']))
print('data=', response.meta['data'])
try:
data_list = json_data['data']
if data_list:
for i in data_list:
user_id = i['orgUserId']
company = i['orgSimpleName']
price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else []
if len(price_list):
formdata1 = copy.deepcopy(self.formdata1)
formdata1['orgUserId'] = str(user_id)
formdata1['quoteDate'] = i['quoteDate']
formdata1['quoteType'] = response.meta['data']['quoteType']
url = id_url + '&orgUserId=' + str(user_id) + \
'"eType=' + formdata1['quoteType']
print(user_id, ':', company, ':', len(price_list),
':', city, ':', formdata1['quoteType'])
self.logger.info(str(user_id)+':'+company+':'+str(len(price_list))
+ ':' + city + ':' + formdata1['quoteType'])
yield scrapy.Request(url=url,
priority=1,
callback=self.parse_detail,
errback=self.hand_error,
meta={'data': formdata1,
'city': city,
'header': self.headers,
'user_id': user_id,
'quoteType': formdata1['quoteType'],
}
)
else:
flag = 0
except Exception as e:
# 发送邮件
title = '爬虫' + self.name + '异常'
error_info = misc.get_error_info(str(e))
content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason']
EmailSender().send(title, content)
raise CloseSpider
# # 下一页: rows设置为100时,不需要下一页
# if flag:
# # self.logger.debug('当前页数:{}'.format(response.meta['data']['page']))
# print('当前页数:{}'.format(response.meta['data']['page']))
# print('data=', response.meta['data'])
# formdata = response.meta['data']
# formdata['page'] += 1
# base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
# url = base_url + '&page=' + str(formdata['page'])
# yield scrapy.Request(
# url=url,
# callback=self.parse_id,
# errback=self.hand_error,
# meta={'data': formdata,
# 'city': city,
# 'header': self.headers},
# )
def parse_detail(self, response):
res = response.body.decode('utf-8')
data = json.loads(res)
data = data['data']
price_list = data.get('quotePriceDetailList', [])
if not price_list:
print('formdata1:', response.meta['data'])
else:
print('city', response.meta['city'])
for price in price_list:
item = RzlineItem()
try:
# 发布时间
F2 = price['createTime'] if price['createTime'] else None
F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else ''
item['F2'] = F2
# 机构
# simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else ''
whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else ''
item['F3'] = whole_name if whole_name else ''
# 金额
item['F4'] = price['price'] if price['price'] else ''
# 类型
detail_type = price['detailType'] if price['detailType'] else ''
quote_btype = price['quoteBType'] if price['quoteBType'] else ''
if detail_type in ['gg', 'sh', 'sn']:
kind = self.kind_dict[detail_type]
elif detail_type in ['busEle', 'busPaper']:
kind = self.kind_dict[quote_btype]
else:
kind = ''
item['F5'] = kind
# 每十万
F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else ''
item['F6'] = F6 + '元' if F6 else ''
# 期限
F7 = price['quoteDays'] if price['quoteDays'] else ''
item['F7'] = F7
# 额度
item['F8'] = price['quoteAmount']
# 业务类型(买断、直贴)
item['F9'] = '直贴'
# 联系人
F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else ''
item['F10'] = F10
# 联系方式
F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else ''
item['F11'] = F11
item['F12'] = self.quoteType_dict[response.meta['quoteType']]
# 原始业务类型
item['F13'] = self.busType_dict[data['busType']] if data['busType'] else ''
# 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票
item['F1'] = self._get_uuid(item)
# FT, FV, FP, FU, FS
FS = data['ifStartTrad'] if data['ifStartTrad'] else None
item['FS'] = | city},
)
def parse_id(self, response): | conditional_block |
rzline.py | '海口市', '广州市', '深圳市',
]
# 查询日期
quoteDate = time.mktime(time.strptime(Today, '%Y%m%d'))
quoteDate = str(quoteDate).replace('.', '') + '00'
quoteType_dict = {
'e': '电票',
'se': '小电票',
# 's': '纸票',
# 'b': '商票',
}
# 类型字典
kind_dict = {'gg': '国股', 'sh': '城商', 'sn': '三农',
'busEle': '电子', 'busPaper': '纸质',
'gq': '国企', 'yq': '央企',
'ss': '上市公司', 'my': '民营企业'}
# 业务类型字典
busType_dict = {"1": "买断", "2": "直贴", "0": ""}
# 查询条数
rows = 100
formdata = {
"page": 1,
"city": "",
"rows": rows,
"orderBy": "2",
"quoteType": "",
"detailType": "",
"quoteDate": quoteDate,
"appVersion": "iOS2.6.1",
"ifDefaultCity": "false",
}
formdata1 = {
"quoteDate": "",
"quoteType": "",
"orgUserId": "",
"appVersion": "2.6.1",
}
headers = {
"Content-Length": "82",
"Host": "www.rzline.com",
"Connection": "keep-alive",
"Origin": "http://www.rzline.com",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"X-Requested-With": "XMLHttpRequest",
"Content-Type": "application/json;charset=UTF-8", # 必须添加
"Accept": "application/json, text/javascript, */*; q=0.01", # 必须添加
"Referer": "http://www.rzline.com/web/front/quoteMarket/show",
}
# 连接数据库
db = pymysql.connect(host='10.11.2.138', port=3306, user='sunhf', password='sunhf@345')
cur = db.cursor()
def parse(self, response):
base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
for city in self.city_list:
for type in self.quoteType_dict:
formdata = copy.deepcopy(self.formdata)
formdata['city'] = city
formdata['quoteType'] = type
if city == '上海市':
formdata['ifDefaultCity'] = 'true'
| ta']
if data_list:
for i in data_list:
user_id = i['orgUserId']
company = i['orgSimpleName']
price_list = i['quotePriceDetailList'] if i['quotePriceDetailList'] else []
if len(price_list):
formdata1 = copy.deepcopy(self.formdata1)
formdata1['orgUserId'] = str(user_id)
formdata1['quoteDate'] = i['quoteDate']
formdata1['quoteType'] = response.meta['data']['quoteType']
url = id_url + '&orgUserId=' + str(user_id) + \
'"eType=' + formdata1['quoteType']
print(user_id, ':', company, ':', len(price_list),
':', city, ':', formdata1['quoteType'])
self.logger.info(str(user_id)+':'+company+':'+str(len(price_list))
+ ':' + city + ':' + formdata1['quoteType'])
yield scrapy.Request(url=url,
priority=1,
callback=self.parse_detail,
errback=self.hand_error,
meta={'data': formdata1,
'city': city,
'header': self.headers,
'user_id': user_id,
'quoteType': formdata1['quoteType'],
}
)
else:
flag = 0
except Exception as e:
# 发送邮件
title = '爬虫' + self.name + '异常'
error_info = misc.get_error_info(str(e))
content = '异常位置:' + error_info['pos'] + '\n' + '异常原因:' + error_info['reason']
EmailSender().send(title, content)
raise CloseSpider
# # 下一页: rows设置为100时,不需要下一页
# if flag:
# # self.logger.debug('当前页数:{}'.format(response.meta['data']['page']))
# print('当前页数:{}'.format(response.meta['data']['page']))
# print('data=', response.meta['data'])
# formdata = response.meta['data']
# formdata['page'] += 1
# base_url = 'http://www.rzline.com/web/mobuser/market/quoteShow'
# url = base_url + '&page=' + str(formdata['page'])
# yield scrapy.Request(
# url=url,
# callback=self.parse_id,
# errback=self.hand_error,
# meta={'data': formdata,
# 'city': city,
# 'header': self.headers},
# )
def parse_detail(self, response):
res = response.body.decode('utf-8')
data = json.loads(res)
data = data['data']
price_list = data.get('quotePriceDetailList', [])
if not price_list:
print('formdata1:', response.meta['data'])
else:
print('city', response.meta['city'])
for price in price_list:
item = RzlineItem()
try:
# 发布时间
F2 = price['createTime'] if price['createTime'] else None
F2 = F2.replace('-', '').replace(':', '').replace(' ', '') if F2 else ''
item['F2'] = F2
# 机构
# simple_name = data['orgInfoDto']['orgSimplename'] if data['orgInfoDto'] else ''
whole_name = data['orgInfoDto']['orgWholename'] if data['orgInfoDto'] else ''
item['F3'] = whole_name if whole_name else ''
# 金额
item['F4'] = price['price'] if price['price'] else ''
# 类型
detail_type = price['detailType'] if price['detailType'] else ''
quote_btype = price['quoteBType'] if price['quoteBType'] else ''
if detail_type in ['gg', 'sh', 'sn']:
kind = self.kind_dict[detail_type]
elif detail_type in ['busEle', 'busPaper']:
kind = self.kind_dict[quote_btype]
else:
kind = ''
item['F5'] = kind
# 每十万
F6 = price['tenInterest'].replace('--', '') if 'tenInterest' in price else ''
item['F6'] = F6 + '元' if F6 else ''
# 期限
F7 = price['quoteDays'] if price['quoteDays'] else ''
item['F7'] = F7
# 额度
item['F8'] = price['quoteAmount']
# 业务类型(买断、直贴)
item['F9'] = '直贴'
# 联系人
F10 = data['accountManagerList'][0]['name'] if data['accountManagerList'] else ''
item['F10'] = F10
# 联系方式
F11 = data['accountManagerList'][0]['mobPhone'] if data['accountManagerList'] else ''
item['F11'] = F11
item['F12'] = self.quoteType_dict[response.meta['quoteType']]
# 原始业务类型
item['F13'] = self.busType_dict[data['busType']] if data['busType'] else ''
# 唯一标识: 日期(年/月/日)+机构+类型+数量+期限+业务类型+电票
item['F1'] = self._get_uuid(item)
# FT, FV, FP, FU, FS
FS = data['ifStartTrad'] if data['ifStartTrad'] else None
item['FS'] = | pass
url = base_url + '&page=' + str(formdata['page'])
yield scrapy.Request(
url=url,
dont_filter=True,
callback=self.parse_id,
errback=self.hand_error,
meta={'data': formdata, 'header': self.headers, 'city': city},
)
def parse_id(self, response):
flag = 1
city = response.meta['city']
data = response.body.decode()
json_data = json.loads(data)
id_url = 'http://www.rzline.com/web/mobuser/market/quoteDetail'
print('当前页数:{}'.format(response.meta['data']['page']))
print('data=', response.meta['data'])
try:
data_list = json_data['da | identifier_body |
mod.rs | er) = if fuzzy_terms.is_empty() {
if exact_terms.is_empty() {
(query.into(), QueryType::StartWith, UsageMatcher::default())
} else {
(
exact_terms[0].text.clone(),
QueryType::Exact,
UsageMatcher::new(exact_terms, inverse_terms),
)
}
} else {
(
fuzzy_terms.iter().map(|term| &term.text).join(" "),
QueryType::StartWith,
UsageMatcher::new(exact_terms, inverse_terms),
)
};
// TODO: Search syntax:
// - 'foo
// - foo*
// - foo
//
// if let Some(stripped) = query.strip_suffix('*') {
// (stripped, QueryType::Contain)
// } else if let Some(stripped) = query.strip_prefix('\'') {
// (stripped, QueryType::Exact)
// } else {
// (query, QueryType::StartWith)
// };
QueryInfo {
keyword,
query_type,
usage_matcher,
}
}
#[derive(Debug, Clone, Default)]
struct SearchResults {
/// Last searching results.
///
/// When passing the line content from Vim to Rust, the performance
/// of Vim can become very bad because some lines are extremely long,
/// we cache the last results on Rust to allow passing the line number
/// from Vim later instead.
usages: Usages,
/// Last parsed query info.
query_info: QueryInfo,
}
#[derive(Debug, Clone)]
pub struct DumbJumpProvider {
args: BaseArgs,
/// Results from last searching.
/// This might be a superset of searching results for the last query.
cached_results: SearchResults,
/// Current results from refiltering on `cached_results`.
current_usages: Option<Usages>,
/// Whether the tags file has been (re)-created.
ctags_regenerated: Arc<AtomicBool>,
/// Whether the GTAGS file has been (re)-created.
gtags_regenerated: Arc<AtomicBool>,
}
async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) {
let gtags_searcher = GtagsSearcher::new(cwd);
match gtags_searcher.create_or_update_tags() {
Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate...");
// TODO: creating gtags may take 20s+ for large project
match tokio::task::spawn_blocking({
let gtags_searcher = gtags_searcher.clone();
move || gtags_searcher.force_recreate()
})
.await
{
Ok(_) => {
gtags_regenerated.store(true, Ordering::SeqCst);
tracing::debug!("[dumb_jump] Recreating gtags db successfully");
}
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db");
}
}
}
}
}
impl DumbJumpProvider {
pub async fn new(ctx: &Context) -> Result<Self> {
let args = ctx.parse_provider_args().await?;
Ok(Self {
args,
cached_results: Default::default(),
current_usages: None,
ctags_regenerated: Arc::new(false.into()),
gtags_regenerated: Arc::new(false.into()),
})
}
async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> {
let job_id = utils::calculate_hash(&(&cwd, "dumb_jump"));
if job::reserve(job_id) {
let ctags_future = {
let cwd = cwd.clone();
let mut tags_generator = TagsGenerator::with_dir(cwd.clone());
if let Some(language) = get_language(&extension) {
tags_generator.set_languages(language.into());
}
let ctags_regenerated = self.ctags_regenerated.clone();
// Ctags initialization is usually pretty fast.
async move {
let now = std::time::Instant::now();
let ctags_searcher = CtagsSearcher::new(tags_generator);
match ctags_searcher.generate_tags() {
Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags")
}
}
tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed());
}
};
let gtags_future = {
let cwd: PathBuf = cwd.into();
let gtags_regenerated = self.gtags_regenerated.clone();
let span = tracing::span!(tracing::Level::INFO, "gtags");
async move {
let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await;
}
.instrument(span)
};
fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) {
tokio::task::spawn({
async move {
let now = std::time::Instant::now();
job_future.await;
tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed());
job::unreserve(job_id);
}
});
}
match (*CTAGS_EXISTS, *GTAGS_EXISTS) {
(true, true) => run(
async move {
futures::future::join(ctags_future, gtags_future).await;
},
job_id,
),
(false, false) => {}
(true, false) => run(ctags_future, job_id),
(false, true) => run(gtags_future, job_id),
}
}
Ok(())
}
/// Starts a new searching task.
async fn start_search(
&self,
search_worker: SearchWorker,
query: &str,
query_info: QueryInfo,
) -> Result<SearchResults> {
if query.is_empty() {
return Ok(Default::default());
}
let search_engine = match (
self.ctags_regenerated.load(Ordering::Relaxed),
self.gtags_regenerated.load(Ordering::Relaxed),
) {
(true, true) => SearchEngine::All,
(true, false) => SearchEngine::CtagsAndRegex,
_ => SearchEngine::Regex,
};
let usages = search_engine.run(search_worker).await?;
Ok(SearchResults { usages, query_info })
}
fn on_new_search_results(
&mut self,
search_results: SearchResults,
ctx: &Context,
) -> Result<()> {
let matched = search_results.usages.len();
// Only show the top 200 items.
let (lines, indices): (Vec<_>, Vec<_>) = search_results
.usages
.iter()
.take(200)
.map(|usage| (usage.line.as_str(), usage.indices.as_slice()))
.unzip();
let response = json!({ "lines": lines, "indices": indices, "matched": matched });
ctx.vim
.exec("clap#state#process_response_on_typed", response)?;
self.cached_results = search_results;
self.current_usages.take();
Ok(())
}
}
#[async_trait::async_trait]
impl ClapProvider for DumbJumpProvider {
async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> {
let cwd = ctx.vim.working_dir().await?;
let source_file_extension = ctx.start_buffer_extension()?.to_string();
tokio::task::spawn({
let cwd = cwd.clone();
let extension = source_file_extension.clone();
let dumb_jump = self.clone();
async move {
if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await {
tracing::error!(error = ?err, "Failed to initialize dumb_jump provider");
}
}
});
if let Some(query) = &self.args.query {
let query_info = parse_query_info(query);
let search_worker = SearchWorker {
cwd,
query_info: query_info.clone(),
source_file_extension,
};
let search_results = self.start_search(search_worker, query, query_info).await?;
self.on_new_search_results(search_results, ctx)?;
}
Ok(())
}
async fn on_move(&mut self, ctx: &mut Context) -> Result<()> {
let cur | rent_lines = self
.current_usages
.as_ref()
.unwrap_or(&self.cached_results.usages);
if current_lines.is_empty() {
return Ok(());
}
let input = ctx.vim.input_get().await?;
let lnum = ctx.vim.display_getcurlnum().await?;
// lnum is 1-indexed
let curline = current_lines
.get_line(lnum - 1)
.ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?;
let preview_height = ctx.preview_height().await?;
let (preview_target, preview) =
CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? | identifier_body |
|
mod.rs | the same.
/// - the new query is a subset of last query.
fn is_superset(&self, other: &Self) -> bool {
self.keyword == other.keyword
&& self.query_type == other.query_type
&& self.usage_matcher.is_superset(&other.usage_matcher)
}
}
/// Parses the raw user input and returns the final keyword as well as the constraint terms.
/// Currently, only one keyword is supported.
///
/// `hel 'fn` => `keyword ++ exact_term/inverse_term`.
///
/// # Argument
///
/// - `query`: Initial query typed in the input window.
fn parse_query_info(query: &str) -> QueryInfo {
let Query {
word_terms: _, // TODO: add word_terms to UsageMatcher
exact_terms,
fuzzy_terms,
inverse_terms,
} = Query::from(query);
// If there is no fuzzy term, use the full query as the keyword, | // otherwise restore the fuzzy query as the keyword we are going to search.
let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() {
if exact_terms.is_empty() {
(query.into(), QueryType::StartWith, UsageMatcher::default())
} else {
(
exact_terms[0].text.clone(),
QueryType::Exact,
UsageMatcher::new(exact_terms, inverse_terms),
)
}
} else {
(
fuzzy_terms.iter().map(|term| &term.text).join(" "),
QueryType::StartWith,
UsageMatcher::new(exact_terms, inverse_terms),
)
};
// TODO: Search syntax:
// - 'foo
// - foo*
// - foo
//
// if let Some(stripped) = query.strip_suffix('*') {
// (stripped, QueryType::Contain)
// } else if let Some(stripped) = query.strip_prefix('\'') {
// (stripped, QueryType::Exact)
// } else {
// (query, QueryType::StartWith)
// };
QueryInfo {
keyword,
query_type,
usage_matcher,
}
}
#[derive(Debug, Clone, Default)]
struct SearchResults {
/// Last searching results.
///
/// When passing the line content from Vim to Rust, the performance
/// of Vim can become very bad because some lines are extremely long,
/// we cache the last results on Rust to allow passing the line number
/// from Vim later instead.
usages: Usages,
/// Last parsed query info.
query_info: QueryInfo,
}
#[derive(Debug, Clone)]
pub struct DumbJumpProvider {
args: BaseArgs,
/// Results from last searching.
/// This might be a superset of searching results for the last query.
cached_results: SearchResults,
/// Current results from refiltering on `cached_results`.
current_usages: Option<Usages>,
/// Whether the tags file has been (re)-created.
ctags_regenerated: Arc<AtomicBool>,
/// Whether the GTAGS file has been (re)-created.
gtags_regenerated: Arc<AtomicBool>,
}
async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) {
let gtags_searcher = GtagsSearcher::new(cwd);
match gtags_searcher.create_or_update_tags() {
Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate...");
// TODO: creating gtags may take 20s+ for large project
match tokio::task::spawn_blocking({
let gtags_searcher = gtags_searcher.clone();
move || gtags_searcher.force_recreate()
})
.await
{
Ok(_) => {
gtags_regenerated.store(true, Ordering::SeqCst);
tracing::debug!("[dumb_jump] Recreating gtags db successfully");
}
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db");
}
}
}
}
}
impl DumbJumpProvider {
pub async fn new(ctx: &Context) -> Result<Self> {
let args = ctx.parse_provider_args().await?;
Ok(Self {
args,
cached_results: Default::default(),
current_usages: None,
ctags_regenerated: Arc::new(false.into()),
gtags_regenerated: Arc::new(false.into()),
})
}
async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> {
let job_id = utils::calculate_hash(&(&cwd, "dumb_jump"));
if job::reserve(job_id) {
let ctags_future = {
let cwd = cwd.clone();
let mut tags_generator = TagsGenerator::with_dir(cwd.clone());
if let Some(language) = get_language(&extension) {
tags_generator.set_languages(language.into());
}
let ctags_regenerated = self.ctags_regenerated.clone();
// Ctags initialization is usually pretty fast.
async move {
let now = std::time::Instant::now();
let ctags_searcher = CtagsSearcher::new(tags_generator);
match ctags_searcher.generate_tags() {
Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags")
}
}
tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed());
}
};
let gtags_future = {
let cwd: PathBuf = cwd.into();
let gtags_regenerated = self.gtags_regenerated.clone();
let span = tracing::span!(tracing::Level::INFO, "gtags");
async move {
let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await;
}
.instrument(span)
};
fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) {
tokio::task::spawn({
async move {
let now = std::time::Instant::now();
job_future.await;
tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed());
job::unreserve(job_id);
}
});
}
match (*CTAGS_EXISTS, *GTAGS_EXISTS) {
(true, true) => run(
async move {
futures::future::join(ctags_future, gtags_future).await;
},
job_id,
),
(false, false) => {}
(true, false) => run(ctags_future, job_id),
(false, true) => run(gtags_future, job_id),
}
}
Ok(())
}
/// Starts a new searching task.
async fn start_search(
&self,
search_worker: SearchWorker,
query: &str,
query_info: QueryInfo,
) -> Result<SearchResults> {
if query.is_empty() {
return Ok(Default::default());
}
let search_engine = match (
self.ctags_regenerated.load(Ordering::Relaxed),
self.gtags_regenerated.load(Ordering::Relaxed),
) {
(true, true) => SearchEngine::All,
(true, false) => SearchEngine::CtagsAndRegex,
_ => SearchEngine::Regex,
};
let usages = search_engine.run(search_worker).await?;
Ok(SearchResults { usages, query_info })
}
fn on_new_search_results(
&mut self,
search_results: SearchResults,
ctx: &Context,
) -> Result<()> {
let matched = search_results.usages.len();
// Only show the top 200 items.
let (lines, indices): (Vec<_>, Vec<_>) = search_results
.usages
.iter()
.take(200)
.map(|usage| (usage.line.as_str(), usage.indices.as_slice()))
.unzip();
let response = json!({ "lines": lines, "indices": indices, "matched": matched });
ctx.vim
.exec("clap#state#process_response_on_typed", response)?;
self.cached_results = search_results;
self.current_usages.take();
Ok(())
}
}
#[async_trait::async_trait]
impl ClapProvider for DumbJumpProvider {
async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> {
let cwd = ctx.vim.working_dir().await?;
let source_file_extension = ctx.start_buffer_extension()?.to_string();
tokio::task::spawn({
let cwd = cwd.clone();
let extension = source_file_extension.clone();
let dumb_jump = self.clone();
async move {
if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await {
tracing::error!(error = ?err, "Failed to initialize dumb_jump provider");
}
}
});
if let Some(query) = &self.args.query {
let query_info = parse_query_info(query);
let search_worker = SearchWorker {
cwd,
query_info: query_info.clone(),
source_file | random_line_split |
|
mod.rs | same.
/// - the new query is a subset of last query.
fn is_superset(&self, other: &Self) -> bool {
self.keyword == other.keyword
&& self.query_type == other.query_type
&& self.usage_matcher.is_superset(&other.usage_matcher)
}
}
/// Parses the raw user input and returns the final keyword as well as the constraint terms.
/// Currently, only one keyword is supported.
///
/// `hel 'fn` => `keyword ++ exact_term/inverse_term`.
///
/// # Argument
///
/// - `query`: Initial query typed in the input window.
fn parse_query_info(query: &str) -> QueryInfo {
let Query {
word_terms: _, // TODO: add word_terms to UsageMatcher
exact_terms,
fuzzy_terms,
inverse_terms,
} = Query::from(query);
// If there is no fuzzy term, use the full query as the keyword,
// otherwise restore the fuzzy query as the keyword we are going to search.
let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() {
if exact_terms.is_empty() | else {
(
exact_terms[0].text.clone(),
QueryType::Exact,
UsageMatcher::new(exact_terms, inverse_terms),
)
}
} else {
(
fuzzy_terms.iter().map(|term| &term.text).join(" "),
QueryType::StartWith,
UsageMatcher::new(exact_terms, inverse_terms),
)
};
// TODO: Search syntax:
// - 'foo
// - foo*
// - foo
//
// if let Some(stripped) = query.strip_suffix('*') {
// (stripped, QueryType::Contain)
// } else if let Some(stripped) = query.strip_prefix('\'') {
// (stripped, QueryType::Exact)
// } else {
// (query, QueryType::StartWith)
// };
QueryInfo {
keyword,
query_type,
usage_matcher,
}
}
#[derive(Debug, Clone, Default)]
struct SearchResults {
/// Last searching results.
///
/// When passing the line content from Vim to Rust, the performance
/// of Vim can become very bad because some lines are extremely long,
/// we cache the last results on Rust to allow passing the line number
/// from Vim later instead.
usages: Usages,
/// Last parsed query info.
query_info: QueryInfo,
}
#[derive(Debug, Clone)]
pub struct DumbJumpProvider {
args: BaseArgs,
/// Results from last searching.
/// This might be a superset of searching results for the last query.
cached_results: SearchResults,
/// Current results from refiltering on `cached_results`.
current_usages: Option<Usages>,
/// Whether the tags file has been (re)-created.
ctags_regenerated: Arc<AtomicBool>,
/// Whether the GTAGS file has been (re)-created.
gtags_regenerated: Arc<AtomicBool>,
}
async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) {
let gtags_searcher = GtagsSearcher::new(cwd);
match gtags_searcher.create_or_update_tags() {
Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate...");
// TODO: creating gtags may take 20s+ for large project
match tokio::task::spawn_blocking({
let gtags_searcher = gtags_searcher.clone();
move || gtags_searcher.force_recreate()
})
.await
{
Ok(_) => {
gtags_regenerated.store(true, Ordering::SeqCst);
tracing::debug!("[dumb_jump] Recreating gtags db successfully");
}
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db");
}
}
}
}
}
impl DumbJumpProvider {
pub async fn new(ctx: &Context) -> Result<Self> {
let args = ctx.parse_provider_args().await?;
Ok(Self {
args,
cached_results: Default::default(),
current_usages: None,
ctags_regenerated: Arc::new(false.into()),
gtags_regenerated: Arc::new(false.into()),
})
}
async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> {
let job_id = utils::calculate_hash(&(&cwd, "dumb_jump"));
if job::reserve(job_id) {
let ctags_future = {
let cwd = cwd.clone();
let mut tags_generator = TagsGenerator::with_dir(cwd.clone());
if let Some(language) = get_language(&extension) {
tags_generator.set_languages(language.into());
}
let ctags_regenerated = self.ctags_regenerated.clone();
// Ctags initialization is usually pretty fast.
async move {
let now = std::time::Instant::now();
let ctags_searcher = CtagsSearcher::new(tags_generator);
match ctags_searcher.generate_tags() {
Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags")
}
}
tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed());
}
};
let gtags_future = {
let cwd: PathBuf = cwd.into();
let gtags_regenerated = self.gtags_regenerated.clone();
let span = tracing::span!(tracing::Level::INFO, "gtags");
async move {
let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await;
}
.instrument(span)
};
fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) {
tokio::task::spawn({
async move {
let now = std::time::Instant::now();
job_future.await;
tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed());
job::unreserve(job_id);
}
});
}
match (*CTAGS_EXISTS, *GTAGS_EXISTS) {
(true, true) => run(
async move {
futures::future::join(ctags_future, gtags_future).await;
},
job_id,
),
(false, false) => {}
(true, false) => run(ctags_future, job_id),
(false, true) => run(gtags_future, job_id),
}
}
Ok(())
}
/// Starts a new searching task.
async fn start_search(
&self,
search_worker: SearchWorker,
query: &str,
query_info: QueryInfo,
) -> Result<SearchResults> {
if query.is_empty() {
return Ok(Default::default());
}
let search_engine = match (
self.ctags_regenerated.load(Ordering::Relaxed),
self.gtags_regenerated.load(Ordering::Relaxed),
) {
(true, true) => SearchEngine::All,
(true, false) => SearchEngine::CtagsAndRegex,
_ => SearchEngine::Regex,
};
let usages = search_engine.run(search_worker).await?;
Ok(SearchResults { usages, query_info })
}
fn on_new_search_results(
&mut self,
search_results: SearchResults,
ctx: &Context,
) -> Result<()> {
let matched = search_results.usages.len();
// Only show the top 200 items.
let (lines, indices): (Vec<_>, Vec<_>) = search_results
.usages
.iter()
.take(200)
.map(|usage| (usage.line.as_str(), usage.indices.as_slice()))
.unzip();
let response = json!({ "lines": lines, "indices": indices, "matched": matched });
ctx.vim
.exec("clap#state#process_response_on_typed", response)?;
self.cached_results = search_results;
self.current_usages.take();
Ok(())
}
}
#[async_trait::async_trait]
impl ClapProvider for DumbJumpProvider {
async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> {
let cwd = ctx.vim.working_dir().await?;
let source_file_extension = ctx.start_buffer_extension()?.to_string();
tokio::task::spawn({
let cwd = cwd.clone();
let extension = source_file_extension.clone();
let dumb_jump = self.clone();
async move {
if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await {
tracing::error!(error = ?err, "Failed to initialize dumb_jump provider");
}
}
});
if let Some(query) = &self.args.query {
let query_info = parse_query_info(query);
let search_worker = SearchWorker {
cwd,
query_info: query_info.clone(),
source_file | {
(query.into(), QueryType::StartWith, UsageMatcher::default())
} | conditional_block |
mod.rs | .usage_matcher.is_superset(&other.usage_matcher)
}
}
/// Parses the raw user input and returns the final keyword as well as the constraint terms.
/// Currently, only one keyword is supported.
///
/// `hel 'fn` => `keyword ++ exact_term/inverse_term`.
///
/// # Argument
///
/// - `query`: Initial query typed in the input window.
fn parse_query_info(query: &str) -> QueryInfo {
let Query {
word_terms: _, // TODO: add word_terms to UsageMatcher
exact_terms,
fuzzy_terms,
inverse_terms,
} = Query::from(query);
// If there is no fuzzy term, use the full query as the keyword,
// otherwise restore the fuzzy query as the keyword we are going to search.
let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() {
if exact_terms.is_empty() {
(query.into(), QueryType::StartWith, UsageMatcher::default())
} else {
(
exact_terms[0].text.clone(),
QueryType::Exact,
UsageMatcher::new(exact_terms, inverse_terms),
)
}
} else {
(
fuzzy_terms.iter().map(|term| &term.text).join(" "),
QueryType::StartWith,
UsageMatcher::new(exact_terms, inverse_terms),
)
};
// TODO: Search syntax:
// - 'foo
// - foo*
// - foo
//
// if let Some(stripped) = query.strip_suffix('*') {
// (stripped, QueryType::Contain)
// } else if let Some(stripped) = query.strip_prefix('\'') {
// (stripped, QueryType::Exact)
// } else {
// (query, QueryType::StartWith)
// };
QueryInfo {
keyword,
query_type,
usage_matcher,
}
}
#[derive(Debug, Clone, Default)]
struct SearchResults {
/// Last searching results.
///
/// When passing the line content from Vim to Rust, the performance
/// of Vim can become very bad because some lines are extremely long,
/// we cache the last results on Rust to allow passing the line number
/// from Vim later instead.
usages: Usages,
/// Last parsed query info.
query_info: QueryInfo,
}
#[derive(Debug, Clone)]
pub struct DumbJumpProvider {
args: BaseArgs,
/// Results from last searching.
/// This might be a superset of searching results for the last query.
cached_results: SearchResults,
/// Current results from refiltering on `cached_results`.
current_usages: Option<Usages>,
/// Whether the tags file has been (re)-created.
ctags_regenerated: Arc<AtomicBool>,
/// Whether the GTAGS file has been (re)-created.
gtags_regenerated: Arc<AtomicBool>,
}
async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) {
let gtags_searcher = GtagsSearcher::new(cwd);
match gtags_searcher.create_or_update_tags() {
Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate...");
// TODO: creating gtags may take 20s+ for large project
match tokio::task::spawn_blocking({
let gtags_searcher = gtags_searcher.clone();
move || gtags_searcher.force_recreate()
})
.await
{
Ok(_) => {
gtags_regenerated.store(true, Ordering::SeqCst);
tracing::debug!("[dumb_jump] Recreating gtags db successfully");
}
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Failed to recreate gtags db");
}
}
}
}
}
impl DumbJumpProvider {
pub async fn new(ctx: &Context) -> Result<Self> {
let args = ctx.parse_provider_args().await?;
Ok(Self {
args,
cached_results: Default::default(),
current_usages: None,
ctags_regenerated: Arc::new(false.into()),
gtags_regenerated: Arc::new(false.into()),
})
}
async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> {
let job_id = utils::calculate_hash(&(&cwd, "dumb_jump"));
if job::reserve(job_id) {
let ctags_future = {
let cwd = cwd.clone();
let mut tags_generator = TagsGenerator::with_dir(cwd.clone());
if let Some(language) = get_language(&extension) {
tags_generator.set_languages(language.into());
}
let ctags_regenerated = self.ctags_regenerated.clone();
// Ctags initialization is usually pretty fast.
async move {
let now = std::time::Instant::now();
let ctags_searcher = CtagsSearcher::new(tags_generator);
match ctags_searcher.generate_tags() {
Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst),
Err(e) => {
tracing::error!(error = ?e, "[dumb_jump] 💔 Error at initializing ctags")
}
}
tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed());
}
};
let gtags_future = {
let cwd: PathBuf = cwd.into();
let gtags_regenerated = self.gtags_regenerated.clone();
let span = tracing::span!(tracing::Level::INFO, "gtags");
async move {
let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await;
}
.instrument(span)
};
fn run(job_future: impl Send + Sync + 'static + Future<Output = ()>, job_id: u64) {
tokio::task::spawn({
async move {
let now = std::time::Instant::now();
job_future.await;
tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed());
job::unreserve(job_id);
}
});
}
match (*CTAGS_EXISTS, *GTAGS_EXISTS) {
(true, true) => run(
async move {
futures::future::join(ctags_future, gtags_future).await;
},
job_id,
),
(false, false) => {}
(true, false) => run(ctags_future, job_id),
(false, true) => run(gtags_future, job_id),
}
}
Ok(())
}
/// Starts a new searching task.
async fn start_search(
&self,
search_worker: SearchWorker,
query: &str,
query_info: QueryInfo,
) -> Result<SearchResults> {
if query.is_empty() {
return Ok(Default::default());
}
let search_engine = match (
self.ctags_regenerated.load(Ordering::Relaxed),
self.gtags_regenerated.load(Ordering::Relaxed),
) {
(true, true) => SearchEngine::All,
(true, false) => SearchEngine::CtagsAndRegex,
_ => SearchEngine::Regex,
};
let usages = search_engine.run(search_worker).await?;
Ok(SearchResults { usages, query_info })
}
fn on_new_search_results(
&mut self,
search_results: SearchResults,
ctx: &Context,
) -> Result<()> {
let matched = search_results.usages.len();
// Only show the top 200 items.
let (lines, indices): (Vec<_>, Vec<_>) = search_results
.usages
.iter()
.take(200)
.map(|usage| (usage.line.as_str(), usage.indices.as_slice()))
.unzip();
let response = json!({ "lines": lines, "indices": indices, "matched": matched });
ctx.vim
.exec("clap#state#process_response_on_typed", response)?;
self.cached_results = search_results;
self.current_usages.take();
Ok(())
}
}
#[async_trait::async_trait]
impl ClapProvider for DumbJumpProvider {
async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> {
let cwd = ctx.vim.working_dir().await?;
let source_file_extension = ctx.start_buffer_extension()?.to_string();
tokio::task::spawn({
let cwd = cwd.clone();
let extension = source_file_extension.clone();
let dumb_jump = self.clone();
async move {
if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await {
tracing::error!(error = ?err, "Failed to initialize dumb_jump provider");
}
}
});
if let Some(query) = &self.args.query {
let query_info = parse_query_info(query);
let search_worker = SearchWorker {
cwd,
query_info: query_info.clone(),
source_file_extension,
};
let search_results = self.start_search(search_worker, query, query_info).await?;
self.on_new_search_results(search_results, ctx)?;
}
Ok(())
}
async fn on_move(&mut self | , ctx: | identifier_name |
|
service.go | cheduler *Scheduler
}
func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service {
dir := config.TempDir
if dir == "" {
var err error
dir, err = ioutil.TempDir("", "dashboard-logs")
if err != nil {
log.Fatal("Failed to create directory for storing logs", zap.Error(err))
}
}
err := autoMigrate(db)
if err != nil {
log.Fatal("Failed to initialize database", zap.Error(err))
}
cleanupAllTasks(db)
service := &Service{
config: config,
logStoreDirectory: dir,
db: db,
scheduler: nil, // will be filled after scheduler is created
}
scheduler := NewScheduler(service)
service.scheduler = scheduler
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
service.lifecycleCtx = ctx
return nil
},
})
return service
}
func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) {
endpoint := r.Group("/logs")
{
endpoint.GET("/download", s.DownloadLogs)
endpoint.Use(auth.MWAuthRequired())
{
endpoint.GET("/download/acquire_token", s.GetDownloadToken)
endpoint.PUT("/taskgroup", s.CreateTaskGroup)
endpoint.GET("/taskgroups", s.GetAllTaskGroups)
endpoint.GET("/taskgroups/:id", s.GetTaskGroup)
endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview)
endpoint.POST("/taskgroups/:id/retry", s.RetryTask)
endpoint.POST("/taskgroups/:id/cancel", s.CancelTask)
endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup)
}
}
}
type CreateTaskGroupRequest struct {
Request SearchLogRequest `json:"request" binding:"required"`
Targets []model.RequestTargetNode `json:"targets" binding:"required"`
}
type TaskGroupResponse struct {
TaskGroup TaskGroupModel `json:"task_group"`
Tasks []*TaskModel `json:"tasks"`
}
// @Summary Create and run a new log search task group
// @Param request body CreateTaskGroupRequest true "Request body"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 400 {object} utils.APIError "Bad request"
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroup [put]
func (s *Service) CreateTaskGroup(c *gin.Context) {
var req CreateTaskGroupRequest
if err := c.ShouldBindJSON(&req); err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
if len(req.Targets) == 0 {
utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target")
return
}
stats := model.NewRequestTargetStatisticsFromArray(&req.Targets)
taskGroup := TaskGroupModel{
SearchRequest: &req.Request,
State: TaskGroupStateRunning,
TargetStats: stats,
}
if err := s.db.Create(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0, len(req.Targets))
for _, t := range req.Targets {
target := t
task := &TaskModel{
TaskGroupID: taskGroup.ID,
Target: &target,
State: TaskStateRunning,
}
// Ignore task creation errors
s.db.Create(task)
tasks = append(tasks, task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID))
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary List all log search task groups
// @Security JwtAuth
// @Success 200 {array} TaskGroupModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups [get]
func (s *Service) GetAllTaskGroups(c *gin.Context) {
var taskGroups []*TaskGroupModel
err := s.db.Find(&taskGroups).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, taskGroups)
}
// @Summary List tasks in a log search task group
// @Param id path string true "Task Group ID"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id} [get]
func (s *Service) GetTaskGroup(c *gin.Context) |
// @Summary Preview a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {array} PreviewModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/preview [get]
func (s *Service) GetTaskGroupPreview(c *gin.Context) {
taskGroupID := c.Param("id")
var lines []PreviewModel
err := s.db.
Where("task_group_id = ?", taskGroupID).
Order("time").
Limit(TaskMaxPreviewLines).
Find(&lines).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, lines)
}
// @Summary Retry failed tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 400 {object} utils.APIError
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/retry [post]
func (s *Service) RetryTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
// Currently we can only retry finished task group.
taskGroup := TaskGroupModel{}
if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0)
if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil {
_ = c.Error(err)
return
}
if len(tasks) == 0 {
// No tasks to retry
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
return
}
// Reset task status
taskGroup.State = TaskGroupStateRunning
s.db.Save(&taskGroup)
for _, task := range tasks {
task.Error = nil
task.State = TaskStateRunning
s.db.Save(task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID))
}
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
}
// @Summary Cancel running tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 400 {object} utils.APIError
// @Router /logs/taskgroups/{id}/cancel [post]
func (s *Service) CancelTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
taskGroup := TaskGroupModel{}
err = s.db.First(&taskGroup, taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
if taskGroup.State != TaskGroupStateRunning {
utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running")
return | {
taskGroupID := c.Param("id")
var taskGroup TaskGroupModel
var tasks []*TaskModel
err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error
if err != nil {
_ = c.Error(err)
return
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
} | identifier_body |
service.go | cheduler *Scheduler
}
func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service {
dir := config.TempDir
if dir == "" {
var err error
dir, err = ioutil.TempDir("", "dashboard-logs")
if err != nil {
log.Fatal("Failed to create directory for storing logs", zap.Error(err))
}
}
err := autoMigrate(db)
if err != nil {
log.Fatal("Failed to initialize database", zap.Error(err))
}
cleanupAllTasks(db)
service := &Service{
config: config,
logStoreDirectory: dir,
db: db,
scheduler: nil, // will be filled after scheduler is created
}
scheduler := NewScheduler(service)
service.scheduler = scheduler
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
service.lifecycleCtx = ctx
return nil
},
})
return service
}
func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) {
endpoint := r.Group("/logs")
{
endpoint.GET("/download", s.DownloadLogs)
endpoint.Use(auth.MWAuthRequired())
{
endpoint.GET("/download/acquire_token", s.GetDownloadToken)
endpoint.PUT("/taskgroup", s.CreateTaskGroup)
endpoint.GET("/taskgroups", s.GetAllTaskGroups)
endpoint.GET("/taskgroups/:id", s.GetTaskGroup)
endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview)
endpoint.POST("/taskgroups/:id/retry", s.RetryTask)
endpoint.POST("/taskgroups/:id/cancel", s.CancelTask)
endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup)
}
}
}
type CreateTaskGroupRequest struct {
Request SearchLogRequest `json:"request" binding:"required"`
Targets []model.RequestTargetNode `json:"targets" binding:"required"`
}
type TaskGroupResponse struct {
TaskGroup TaskGroupModel `json:"task_group"`
Tasks []*TaskModel `json:"tasks"`
}
// @Summary Create and run a new log search task group
// @Param request body CreateTaskGroupRequest true "Request body"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 400 {object} utils.APIError "Bad request"
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroup [put]
func (s *Service) CreateTaskGroup(c *gin.Context) {
var req CreateTaskGroupRequest
if err := c.ShouldBindJSON(&req); err != nil |
if len(req.Targets) == 0 {
utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target")
return
}
stats := model.NewRequestTargetStatisticsFromArray(&req.Targets)
taskGroup := TaskGroupModel{
SearchRequest: &req.Request,
State: TaskGroupStateRunning,
TargetStats: stats,
}
if err := s.db.Create(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0, len(req.Targets))
for _, t := range req.Targets {
target := t
task := &TaskModel{
TaskGroupID: taskGroup.ID,
Target: &target,
State: TaskStateRunning,
}
// Ignore task creation errors
s.db.Create(task)
tasks = append(tasks, task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID))
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary List all log search task groups
// @Security JwtAuth
// @Success 200 {array} TaskGroupModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups [get]
func (s *Service) GetAllTaskGroups(c *gin.Context) {
var taskGroups []*TaskGroupModel
err := s.db.Find(&taskGroups).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, taskGroups)
}
// @Summary List tasks in a log search task group
// @Param id path string true "Task Group ID"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id} [get]
func (s *Service) GetTaskGroup(c *gin.Context) {
taskGroupID := c.Param("id")
var taskGroup TaskGroupModel
var tasks []*TaskModel
err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error
if err != nil {
_ = c.Error(err)
return
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary Preview a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {array} PreviewModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/preview [get]
func (s *Service) GetTaskGroupPreview(c *gin.Context) {
taskGroupID := c.Param("id")
var lines []PreviewModel
err := s.db.
Where("task_group_id = ?", taskGroupID).
Order("time").
Limit(TaskMaxPreviewLines).
Find(&lines).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, lines)
}
// @Summary Retry failed tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 400 {object} utils.APIError
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/retry [post]
func (s *Service) RetryTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
// Currently we can only retry finished task group.
taskGroup := TaskGroupModel{}
if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0)
if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil {
_ = c.Error(err)
return
}
if len(tasks) == 0 {
// No tasks to retry
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
return
}
// Reset task status
taskGroup.State = TaskGroupStateRunning
s.db.Save(&taskGroup)
for _, task := range tasks {
task.Error = nil
task.State = TaskStateRunning
s.db.Save(task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID))
}
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
}
// @Summary Cancel running tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 400 {object} utils.APIError
// @Router /logs/taskgroups/{id}/cancel [post]
func (s *Service) CancelTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
taskGroup := TaskGroupModel{}
err = s.db.First(&taskGroup, taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
if taskGroup.State != TaskGroupStateRunning {
utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running")
return | {
utils.MakeInvalidRequestErrorFromError(c, err)
return
} | conditional_block |
service.go | cheduler *Scheduler
}
func NewService(lc fx.Lifecycle, config *config.Config, db *dbstore.DB) *Service {
dir := config.TempDir
if dir == "" {
var err error
dir, err = ioutil.TempDir("", "dashboard-logs")
if err != nil {
log.Fatal("Failed to create directory for storing logs", zap.Error(err))
}
}
err := autoMigrate(db)
if err != nil {
log.Fatal("Failed to initialize database", zap.Error(err))
}
cleanupAllTasks(db)
service := &Service{
config: config,
logStoreDirectory: dir,
db: db,
scheduler: nil, // will be filled after scheduler is created
}
scheduler := NewScheduler(service)
service.scheduler = scheduler
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
service.lifecycleCtx = ctx
return nil
},
})
return service
}
func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) {
endpoint := r.Group("/logs")
{
endpoint.GET("/download", s.DownloadLogs)
endpoint.Use(auth.MWAuthRequired())
{
endpoint.GET("/download/acquire_token", s.GetDownloadToken)
endpoint.PUT("/taskgroup", s.CreateTaskGroup)
endpoint.GET("/taskgroups", s.GetAllTaskGroups)
endpoint.GET("/taskgroups/:id", s.GetTaskGroup)
endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview)
endpoint.POST("/taskgroups/:id/retry", s.RetryTask)
endpoint.POST("/taskgroups/:id/cancel", s.CancelTask)
endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup)
}
}
}
type CreateTaskGroupRequest struct {
Request SearchLogRequest `json:"request" binding:"required"`
Targets []model.RequestTargetNode `json:"targets" binding:"required"`
}
type TaskGroupResponse struct {
TaskGroup TaskGroupModel `json:"task_group"`
Tasks []*TaskModel `json:"tasks"`
}
// @Summary Create and run a new log search task group
// @Param request body CreateTaskGroupRequest true "Request body"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 400 {object} utils.APIError "Bad request"
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroup [put]
func (s *Service) CreateTaskGroup(c *gin.Context) {
var req CreateTaskGroupRequest
if err := c.ShouldBindJSON(&req); err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
if len(req.Targets) == 0 {
utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target")
return
}
stats := model.NewRequestTargetStatisticsFromArray(&req.Targets)
taskGroup := TaskGroupModel{
SearchRequest: &req.Request,
State: TaskGroupStateRunning,
TargetStats: stats,
}
if err := s.db.Create(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0, len(req.Targets))
for _, t := range req.Targets {
target := t
task := &TaskModel{
TaskGroupID: taskGroup.ID,
Target: &target,
State: TaskStateRunning,
}
// Ignore task creation errors
s.db.Create(task)
tasks = append(tasks, task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID))
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary List all log search task groups
// @Security JwtAuth
// @Success 200 {array} TaskGroupModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups [get]
func (s *Service) GetAllTaskGroups(c *gin.Context) {
var taskGroups []*TaskGroupModel
err := s.db.Find(&taskGroups).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, taskGroups)
}
// @Summary List tasks in a log search task group
// @Param id path string true "Task Group ID"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id} [get]
func (s *Service) GetTaskGroup(c *gin.Context) {
taskGroupID := c.Param("id")
var taskGroup TaskGroupModel
var tasks []*TaskModel
err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error
if err != nil {
_ = c.Error(err)
return
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary Preview a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {array} PreviewModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/preview [get]
func (s *Service) | (c *gin.Context) {
taskGroupID := c.Param("id")
var lines []PreviewModel
err := s.db.
Where("task_group_id = ?", taskGroupID).
Order("time").
Limit(TaskMaxPreviewLines).
Find(&lines).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, lines)
}
// @Summary Retry failed tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 400 {object} utils.APIError
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/retry [post]
func (s *Service) RetryTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
// Currently we can only retry finished task group.
taskGroup := TaskGroupModel{}
if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0)
if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil {
_ = c.Error(err)
return
}
if len(tasks) == 0 {
// No tasks to retry
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
return
}
// Reset task status
taskGroup.State = TaskGroupStateRunning
s.db.Save(&taskGroup)
for _, task := range tasks {
task.Error = nil
task.State = TaskStateRunning
s.db.Save(task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID))
}
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
}
// @Summary Cancel running tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 400 {object} utils.APIError
// @Router /logs/taskgroups/{id}/cancel [post]
func (s *Service) CancelTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
taskGroup := TaskGroupModel{}
err = s.db.First(&taskGroup, taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
if taskGroup.State != TaskGroupStateRunning {
utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running")
return
| GetTaskGroupPreview | identifier_name |
service.go | StoreDirectory: dir,
db: db,
scheduler: nil, // will be filled after scheduler is created
}
scheduler := NewScheduler(service)
service.scheduler = scheduler
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
service.lifecycleCtx = ctx
return nil
},
})
return service
}
func RegisterRouter(r *gin.RouterGroup, auth *user.AuthService, s *Service) {
endpoint := r.Group("/logs")
{
endpoint.GET("/download", s.DownloadLogs)
endpoint.Use(auth.MWAuthRequired())
{
endpoint.GET("/download/acquire_token", s.GetDownloadToken)
endpoint.PUT("/taskgroup", s.CreateTaskGroup)
endpoint.GET("/taskgroups", s.GetAllTaskGroups)
endpoint.GET("/taskgroups/:id", s.GetTaskGroup)
endpoint.GET("/taskgroups/:id/preview", s.GetTaskGroupPreview)
endpoint.POST("/taskgroups/:id/retry", s.RetryTask)
endpoint.POST("/taskgroups/:id/cancel", s.CancelTask)
endpoint.DELETE("/taskgroups/:id", s.DeleteTaskGroup)
}
}
}
type CreateTaskGroupRequest struct {
Request SearchLogRequest `json:"request" binding:"required"`
Targets []model.RequestTargetNode `json:"targets" binding:"required"`
}
type TaskGroupResponse struct {
TaskGroup TaskGroupModel `json:"task_group"`
Tasks []*TaskModel `json:"tasks"`
}
// @Summary Create and run a new log search task group
// @Param request body CreateTaskGroupRequest true "Request body"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 400 {object} utils.APIError "Bad request"
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroup [put]
func (s *Service) CreateTaskGroup(c *gin.Context) {
var req CreateTaskGroupRequest
if err := c.ShouldBindJSON(&req); err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
if len(req.Targets) == 0 {
utils.MakeInvalidRequestErrorWithMessage(c, "Expect at least 1 target")
return
}
stats := model.NewRequestTargetStatisticsFromArray(&req.Targets)
taskGroup := TaskGroupModel{
SearchRequest: &req.Request,
State: TaskGroupStateRunning,
TargetStats: stats,
}
if err := s.db.Create(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0, len(req.Targets))
for _, t := range req.Targets {
target := t
task := &TaskModel{
TaskGroupID: taskGroup.ID,
Target: &target,
State: TaskStateRunning,
}
// Ignore task creation errors
s.db.Create(task)
tasks = append(tasks, task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to start task group", zap.Uint("task_group_id", taskGroup.ID))
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary List all log search task groups
// @Security JwtAuth
// @Success 200 {array} TaskGroupModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups [get]
func (s *Service) GetAllTaskGroups(c *gin.Context) {
var taskGroups []*TaskGroupModel
err := s.db.Find(&taskGroups).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, taskGroups)
}
// @Summary List tasks in a log search task group
// @Param id path string true "Task Group ID"
// @Security JwtAuth
// @Success 200 {object} TaskGroupResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id} [get]
func (s *Service) GetTaskGroup(c *gin.Context) {
taskGroupID := c.Param("id")
var taskGroup TaskGroupModel
var tasks []*TaskModel
err := s.db.First(&taskGroup, "id = ?", taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
err = s.db.Where("task_group_id = ?", taskGroupID).Find(&tasks).Error
if err != nil {
_ = c.Error(err)
return
}
resp := TaskGroupResponse{
TaskGroup: taskGroup,
Tasks: tasks,
}
c.JSON(http.StatusOK, resp)
}
// @Summary Preview a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {array} PreviewModel
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/preview [get]
func (s *Service) GetTaskGroupPreview(c *gin.Context) {
taskGroupID := c.Param("id")
var lines []PreviewModel
err := s.db.
Where("task_group_id = ?", taskGroupID).
Order("time").
Limit(TaskMaxPreviewLines).
Find(&lines).Error
if err != nil {
_ = c.Error(err)
return
}
c.JSON(http.StatusOK, lines)
}
// @Summary Retry failed tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 400 {object} utils.APIError
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id}/retry [post]
func (s *Service) RetryTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
// Currently we can only retry finished task group.
taskGroup := TaskGroupModel{}
if err := s.db.Where("id = ? AND state = ?", taskGroupID, TaskGroupStateFinished).First(&taskGroup).Error; err != nil {
_ = c.Error(err)
return
}
tasks := make([]*TaskModel, 0)
if err := s.db.Where("task_group_id = ? AND state = ?", taskGroupID, TaskStateError).Find(&tasks).Error; err != nil {
_ = c.Error(err)
return
}
if len(tasks) == 0 {
// No tasks to retry
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
return
}
// Reset task status
taskGroup.State = TaskGroupStateRunning
s.db.Save(&taskGroup)
for _, task := range tasks {
task.Error = nil
task.State = TaskStateRunning
s.db.Save(task)
}
if !s.scheduler.AsyncStart(&taskGroup, tasks) {
log.Error("Failed to retry task group", zap.Uint("task_group_id", taskGroup.ID))
}
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
}
// @Summary Cancel running tasks in a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 400 {object} utils.APIError
// @Router /logs/taskgroups/{id}/cancel [post]
func (s *Service) CancelTask(c *gin.Context) {
taskGroupID, err := strconv.Atoi(c.Param("id"))
if err != nil {
utils.MakeInvalidRequestErrorFromError(c, err)
return
}
taskGroup := TaskGroupModel{}
err = s.db.First(&taskGroup, taskGroupID).Error
if err != nil {
_ = c.Error(err)
return
}
if taskGroup.State != TaskGroupStateRunning {
utils.MakeInvalidRequestErrorWithMessage(c, "Task is not running")
return
}
s.scheduler.AsyncAbort(uint(taskGroupID))
c.JSON(http.StatusOK, utils.APIEmptyResponse{})
}
// @Summary Delete a log search task group
// @Param id path string true "task group id"
// @Security JwtAuth
// @Success 200 {object} utils.APIEmptyResponse
// @Failure 401 {object} utils.APIError "Unauthorized failure"
// @Failure 500 {object} utils.APIError
// @Router /logs/taskgroups/{id} [delete]
func (s *Service) DeleteTaskGroup(c *gin.Context) { | taskGroupID := c.Param("id") | random_line_split |
|
create.go | 8s.io/kubeadm/kinder/pkg/constants"
"k8s.io/kubeadm/kinder/pkg/cri/host"
"k8s.io/kubeadm/kinder/pkg/cri/nodes"
"k8s.io/kubeadm/kinder/pkg/exec"
)
// CreateOptions holds all the options used at create time
type CreateOptions struct {
controlPlanes int
workers int
image string
externalLoadBalancer bool
externalEtcd bool
retain bool
volumes []string
}
// CreateOption is a configuration option supplied to Create
type CreateOption func(*CreateOptions)
// ControlPlanes sets the number of control plane nodes for create
func ControlPlanes(controlPlanes int) CreateOption {
return func(c *CreateOptions) {
c.controlPlanes = controlPlanes
}
}
// Workers sets the number of worker nodes for create
func Workers(workers int) CreateOption {
return func(c *CreateOptions) {
c.workers = workers
}
}
// Image sets the image for create
func Image(image string) CreateOption {
return func(c *CreateOptions) {
c.image = image
}
}
// ExternalEtcd instruct create to add an external etcd to the cluster
func ExternalEtcd(externalEtcd bool) CreateOption {
return func(c *CreateOptions) {
c.externalEtcd = externalEtcd
}
}
// ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster.
// NB. this happens automatically when there are more than two control plane instances, but with this flag
// it is possible to override the default behaviour
func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption {
return func(c *CreateOptions) {
c.externalLoadBalancer = externalLoadBalancer
}
}
// Retain option instructs create cluster to preserve node in case of errors for debugging purposes
func Retain(retain bool) CreateOption {
return func(c *CreateOptions) {
c.retain = retain
}
}
// Volumes option instructs create cluster to add volumes to the node containers
func Volumes(volumes []string) CreateOption {
return func(c *CreateOptions) {
c.volumes = volumes
}
}
// CreateCluster creates a new kinder cluster
func CreateCluster(clusterName string, options ...CreateOption) error {
flags := &CreateOptions{}
for _, o := range options {
o(flags)
}
// Check if the cluster name already exists
known, err := status.IsKnown(clusterName)
if err != nil {
return err
}
if known {
return errors.Errorf("a cluster with the name %q already exists", clusterName)
}
fmt.Printf("Creating cluster %q ...\n", clusterName)
// attempt to explicitly pull the required node image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
ensureNodeImage(flags.image)
handleErr := func(err error) error {
// In case of errors nodes are deleted (except if retain is explicitly set)
if !flags.retain {
if c, err := status.FromDocker(clusterName); err != nil {
log.Error(err)
} else {
for _, n := range c.AllNodes() {
if err := exec.NewHostCmd(
"docker",
"rm",
"-f", // force the container to be deleted now
"-v", // delete volumes
n.Name(),
).Run(); err != nil {
return errors.Wrapf(err, "failed to delete node %s", n.Name())
}
}
}
}
log.Error(err)
return err
}
// Create node containers as defined in the kind config
if err := createNodes(
clusterName,
flags,
); err != nil {
return handleErr(err)
}
fmt.Println()
fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n")
fmt.Printf("kinder do, the kinder swiss knife 🚀!\n")
return nil
}
func createNodes(clusterName string, flags *CreateOptions) error {
// compute the desired nodes, and inform the user that we are setting them up
desiredNodes := nodesToCreate(clusterName, flags)
numberOfNodes := len(desiredNodes)
if flags.externalEtcd {
numberOfNodes++
}
fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes))
// detect CRI runtime installed into images before actually creating nodes
runtime, err := status.InspectCRIinImage(flags.image)
if err != nil {
log.Errorf("Error detecting CRI for images %s! %v", flags.image, err)
return err
}
log.Infof("Detected %s container runtime for image %s", runtime, flags.image)
createHelper, err := nodes.NewCreateHelper(runtime)
if err != nil {
log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err)
return err
}
// create all of the node containers, concurrently
fns := []func() error{}
for _, desiredNode := range desiredNodes {
desiredNode := desiredNode // capture loop variable
fns = append(fns, func() error {
switch desiredNode.Role {
case constants.ExternalLoadBalancerNodeRoleValue:
return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name)
case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue:
return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes)
default:
return nil
}
})
}
log.Info("Creating nodes...")
if err := untilError(fns); err != nil {
return err
}
// add an external etcd if explicitly requested
if flags.externalEtcd {
log.Info("Getting required etcd image...")
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
etcdImage, err := c.BootstrapControlPlane().EtcdImage()
if err != nil {
return err
}
// attempt to explicitly pull the etcdImage if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = host.PullImage(etcdImage, 4)
log.Info("Creating external etcd...")
if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil {
return err
}
}
// wait for all node containers to have a Running status
log.Info("Waiting for all nodes to start...")
timeout := time.Second * 40
for _, n := range desiredNodes {
var lastErr error
log.Infof("Waiting for node %s to start...", n.Name)
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
lines, err := exec.NewHostCmd(
"docker",
"container",
"inspect",
"-f",
"'{{.State.Running}}'",
n.Name,
).RunAndCapture()
if err == nil && len(lines) > 0 && lines[0] == `'true'` {
return true, nil
}
lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines)
return false, nil
})
if err != nil {
return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout)
}
}
// get the cluster
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
c.Settings = &status.ClusterSettings{
IPFamily: status.IPv4Family, // only IPv4 is tested with kinder
}
// TODO: the cluster and node settings are currently unused by kinder
// Enable these writes if settings have to stored on the nodes
//
// // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle.
//
// if err := c.WriteSettings(); err != nil {
// return err
// }
//
// for _, n := range c.K8sNodes() {
// if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil {
// return err
// }
// }
return nil
}
// nodeSpec describes a node to create purely from the container aspect
// this does not include eg starting kubernetes (see actions for that)
type nodeSpec struct {
Name string
Role string
}
// nodesToCreate return the list of nodes to create for the cluster
func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec {
var desiredNodes []nodeSpec
// prepare nodes explicitly
for n := 0; n < flags.controlPlanes; n++ {
role := constants.ControlPlaneNodeRoleValue
desiredNode := nodeSpec{ | desiredNodes = append(desiredNodes, desiredNode)
}
for n := 0; n | Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1),
Role: role,
} | random_line_split |
create.go | CreateOptions) {
c.retain = retain
}
}
// Volumes option instructs create cluster to add volumes to the node containers
func Volumes(volumes []string) CreateOption {
return func(c *CreateOptions) {
c.volumes = volumes
}
}
// CreateCluster creates a new kinder cluster
func CreateCluster(clusterName string, options ...CreateOption) error {
flags := &CreateOptions{}
for _, o := range options {
o(flags)
}
// Check if the cluster name already exists
known, err := status.IsKnown(clusterName)
if err != nil {
return err
}
if known {
return errors.Errorf("a cluster with the name %q already exists", clusterName)
}
fmt.Printf("Creating cluster %q ...\n", clusterName)
// attempt to explicitly pull the required node image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
ensureNodeImage(flags.image)
handleErr := func(err error) error {
// In case of errors nodes are deleted (except if retain is explicitly set)
if !flags.retain {
if c, err := status.FromDocker(clusterName); err != nil {
log.Error(err)
} else {
for _, n := range c.AllNodes() {
if err := exec.NewHostCmd(
"docker",
"rm",
"-f", // force the container to be deleted now
"-v", // delete volumes
n.Name(),
).Run(); err != nil {
return errors.Wrapf(err, "failed to delete node %s", n.Name())
}
}
}
}
log.Error(err)
return err
}
// Create node containers as defined in the kind config
if err := createNodes(
clusterName,
flags,
); err != nil {
return handleErr(err)
}
fmt.Println()
fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n")
fmt.Printf("kinder do, the kinder swiss knife 🚀!\n")
return nil
}
func createNodes(clusterName string, flags *CreateOptions) error {
// compute the desired nodes, and inform the user that we are setting them up
desiredNodes := nodesToCreate(clusterName, flags)
numberOfNodes := len(desiredNodes)
if flags.externalEtcd {
numberOfNodes++
}
fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes))
// detect CRI runtime installed into images before actually creating nodes
runtime, err := status.InspectCRIinImage(flags.image)
if err != nil {
log.Errorf("Error detecting CRI for images %s! %v", flags.image, err)
return err
}
log.Infof("Detected %s container runtime for image %s", runtime, flags.image)
createHelper, err := nodes.NewCreateHelper(runtime)
if err != nil {
log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err)
return err
}
// create all of the node containers, concurrently
fns := []func() error{}
for _, desiredNode := range desiredNodes {
desiredNode := desiredNode // capture loop variable
fns = append(fns, func() error {
switch desiredNode.Role {
case constants.ExternalLoadBalancerNodeRoleValue:
return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name)
case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue:
return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes)
default:
return nil
}
})
}
log.Info("Creating nodes...")
if err := untilError(fns); err != nil {
return err
}
// add an external etcd if explicitly requested
if flags.externalEtcd {
log.Info("Getting required etcd image...")
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
etcdImage, err := c.BootstrapControlPlane().EtcdImage()
if err != nil {
return err
}
// attempt to explicitly pull the etcdImage if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = host.PullImage(etcdImage, 4)
log.Info("Creating external etcd...")
if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil {
return err
}
}
// wait for all node containers to have a Running status
log.Info("Waiting for all nodes to start...")
timeout := time.Second * 40
for _, n := range desiredNodes {
var lastErr error
log.Infof("Waiting for node %s to start...", n.Name)
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
lines, err := exec.NewHostCmd(
"docker",
"container",
"inspect",
"-f",
"'{{.State.Running}}'",
n.Name,
).RunAndCapture()
if err == nil && len(lines) > 0 && lines[0] == `'true'` {
return true, nil
}
lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines)
return false, nil
})
if err != nil {
return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout)
}
}
// get the cluster
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
c.Settings = &status.ClusterSettings{
IPFamily: status.IPv4Family, // only IPv4 is tested with kinder
}
// TODO: the cluster and node settings are currently unused by kinder
// Enable these writes if settings have to stored on the nodes
//
// // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle.
//
// if err := c.WriteSettings(); err != nil {
// return err
// }
//
// for _, n := range c.K8sNodes() {
// if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil {
// return err
// }
// }
return nil
}
// nodeSpec describes a node to create purely from the container aspect
// this does not include eg starting kubernetes (see actions for that)
type nodeSpec struct {
Name string
Role string
}
// nodesToCreate return the list of nodes to create for the cluster
func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec {
var desiredNodes []nodeSpec
// prepare nodes explicitly
for n := 0; n < flags.controlPlanes; n++ {
role := constants.ControlPlaneNodeRoleValue
desiredNode := nodeSpec{
Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1),
Role: role,
}
desiredNodes = append(desiredNodes, desiredNode)
}
for n := 0; n < flags.workers; n++ {
role := constants.WorkerNodeRoleValue
desiredNode := nodeSpec{
Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1),
Role: role,
}
desiredNodes = append(desiredNodes, desiredNode)
}
// add an external load balancer if explicitly requested or if there are multiple control planes
if flags.externalLoadBalancer || flags.controlPlanes > 1 {
role := constants.ExternalLoadBalancerNodeRoleValue
desiredNodes = append(desiredNodes, nodeSpec{
Name: fmt.Sprintf("%s-lb", clusterName),
Role: role,
})
}
return desiredNodes
}
// ensureNodeImage ensures that the node image used by the create is present
func ensureNodeImage(image string) {
fmt.Printf("Ensuring node image (%s) 🖼\n", image)
// attempt to explicitly pull the image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = host.PullImage(image, 4)
}
// UntilError runs all funcs in separate goroutines, returning the
// first non-nil error returned from funcs, or nil if all funcs return nil
// Nb. this func was originally imported from "sigs.k8s.io/kind/pkg/concurrent"; it is still available
// in the kind codebase, but it has been slightly refactored.
func untilError(funcs []func() error) error {
errCh | := make(chan error, len(funcs))
for _, f := range funcs {
f := f // capture f
go func() {
errCh <- f()
}()
}
for i := 0; i < len(funcs); i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
| identifier_body |
|
create.go | 8s.io/kubeadm/kinder/pkg/constants"
"k8s.io/kubeadm/kinder/pkg/cri/host"
"k8s.io/kubeadm/kinder/pkg/cri/nodes"
"k8s.io/kubeadm/kinder/pkg/exec"
)
// CreateOptions holds all the options used at create time
type CreateOptions struct {
controlPlanes int
workers int
image string
externalLoadBalancer bool
externalEtcd bool
retain bool
volumes []string
}
// CreateOption is a configuration option supplied to Create
type CreateOption func(*CreateOptions)
// ControlPlanes sets the number of control plane nodes for create
func ControlPlanes(controlPlanes int) CreateOption {
return func(c *CreateOptions) {
c.controlPlanes = controlPlanes
}
}
// Workers sets the number of worker nodes for create
func Workers(workers int) CreateOption {
return func(c *CreateOptions) {
c.workers = workers
}
}
// Image sets the image for create
func Image(image string) CreateOption {
return func(c *CreateOptions) {
c.image = image
}
}
// ExternalEtcd instruct create to add an external etcd to the cluster
func ExternalEtcd(externalEtcd bool) CreateOption {
return func(c *CreateOptions) {
c.externalEtcd = externalEtcd
}
}
// ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster.
// NB. this happens automatically when there are more than two control plane instances, but with this flag
// it is possible to override the default behaviour
func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption {
return func(c *CreateOptions) {
c.externalLoadBalancer = externalLoadBalancer
}
}
// Retain option instructs create cluster to preserve node in case of errors for debugging purposes
func Retain(retain bool) CreateOption {
return func(c *CreateOptions) {
c.retain = retain
}
}
// Volumes option instructs create cluster to add volumes to the node containers
func | (volumes []string) CreateOption {
return func(c *CreateOptions) {
c.volumes = volumes
}
}
// CreateCluster creates a new kinder cluster
func CreateCluster(clusterName string, options ...CreateOption) error {
flags := &CreateOptions{}
for _, o := range options {
o(flags)
}
// Check if the cluster name already exists
known, err := status.IsKnown(clusterName)
if err != nil {
return err
}
if known {
return errors.Errorf("a cluster with the name %q already exists", clusterName)
}
fmt.Printf("Creating cluster %q ...\n", clusterName)
// attempt to explicitly pull the required node image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
ensureNodeImage(flags.image)
handleErr := func(err error) error {
// In case of errors nodes are deleted (except if retain is explicitly set)
if !flags.retain {
if c, err := status.FromDocker(clusterName); err != nil {
log.Error(err)
} else {
for _, n := range c.AllNodes() {
if err := exec.NewHostCmd(
"docker",
"rm",
"-f", // force the container to be deleted now
"-v", // delete volumes
n.Name(),
).Run(); err != nil {
return errors.Wrapf(err, "failed to delete node %s", n.Name())
}
}
}
}
log.Error(err)
return err
}
// Create node containers as defined in the kind config
if err := createNodes(
clusterName,
flags,
); err != nil {
return handleErr(err)
}
fmt.Println()
fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n")
fmt.Printf("kinder do, the kinder swiss knife 🚀!\n")
return nil
}
func createNodes(clusterName string, flags *CreateOptions) error {
// compute the desired nodes, and inform the user that we are setting them up
desiredNodes := nodesToCreate(clusterName, flags)
numberOfNodes := len(desiredNodes)
if flags.externalEtcd {
numberOfNodes++
}
fmt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes))
// detect CRI runtime installed into images before actually creating nodes
runtime, err := status.InspectCRIinImage(flags.image)
if err != nil {
log.Errorf("Error detecting CRI for images %s! %v", flags.image, err)
return err
}
log.Infof("Detected %s container runtime for image %s", runtime, flags.image)
createHelper, err := nodes.NewCreateHelper(runtime)
if err != nil {
log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err)
return err
}
// create all of the node containers, concurrently
fns := []func() error{}
for _, desiredNode := range desiredNodes {
desiredNode := desiredNode // capture loop variable
fns = append(fns, func() error {
switch desiredNode.Role {
case constants.ExternalLoadBalancerNodeRoleValue:
return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name)
case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue:
return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes)
default:
return nil
}
})
}
log.Info("Creating nodes...")
if err := untilError(fns); err != nil {
return err
}
// add an external etcd if explicitly requested
if flags.externalEtcd {
log.Info("Getting required etcd image...")
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
etcdImage, err := c.BootstrapControlPlane().EtcdImage()
if err != nil {
return err
}
// attempt to explicitly pull the etcdImage if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = host.PullImage(etcdImage, 4)
log.Info("Creating external etcd...")
if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil {
return err
}
}
// wait for all node containers to have a Running status
log.Info("Waiting for all nodes to start...")
timeout := time.Second * 40
for _, n := range desiredNodes {
var lastErr error
log.Infof("Waiting for node %s to start...", n.Name)
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
lines, err := exec.NewHostCmd(
"docker",
"container",
"inspect",
"-f",
"'{{.State.Running}}'",
n.Name,
).RunAndCapture()
if err == nil && len(lines) > 0 && lines[0] == `'true'` {
return true, nil
}
lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines)
return false, nil
})
if err != nil {
return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout)
}
}
// get the cluster
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
c.Settings = &status.ClusterSettings{
IPFamily: status.IPv4Family, // only IPv4 is tested with kinder
}
// TODO: the cluster and node settings are currently unused by kinder
// Enable these writes if settings have to stored on the nodes
//
// // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle.
//
// if err := c.WriteSettings(); err != nil {
// return err
// }
//
// for _, n := range c.K8sNodes() {
// if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil {
// return err
// }
// }
return nil
}
// nodeSpec describes a node to create purely from the container aspect
// this does not include eg starting kubernetes (see actions for that)
type nodeSpec struct {
Name string
Role string
}
// nodesToCreate return the list of nodes to create for the cluster
func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec {
var desiredNodes []nodeSpec
// prepare nodes explicitly
for n := 0; n < flags.controlPlanes; n++ {
role := constants.ControlPlaneNodeRoleValue
desiredNode := nodeSpec{
Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1),
Role: role,
}
desiredNodes = append(desiredNodes, desiredNode)
}
for n := 0; n | Volumes | identifier_name |
create.go | 8s.io/kubeadm/kinder/pkg/constants"
"k8s.io/kubeadm/kinder/pkg/cri/host"
"k8s.io/kubeadm/kinder/pkg/cri/nodes"
"k8s.io/kubeadm/kinder/pkg/exec"
)
// CreateOptions holds all the options used at create time
type CreateOptions struct {
controlPlanes int
workers int
image string
externalLoadBalancer bool
externalEtcd bool
retain bool
volumes []string
}
// CreateOption is a configuration option supplied to Create
type CreateOption func(*CreateOptions)
// ControlPlanes sets the number of control plane nodes for create
func ControlPlanes(controlPlanes int) CreateOption {
return func(c *CreateOptions) {
c.controlPlanes = controlPlanes
}
}
// Workers sets the number of worker nodes for create
func Workers(workers int) CreateOption {
return func(c *CreateOptions) {
c.workers = workers
}
}
// Image sets the image for create
func Image(image string) CreateOption {
return func(c *CreateOptions) {
c.image = image
}
}
// ExternalEtcd instruct create to add an external etcd to the cluster
func ExternalEtcd(externalEtcd bool) CreateOption {
return func(c *CreateOptions) {
c.externalEtcd = externalEtcd
}
}
// ExternalLoadBalancer instruct create to add an external loadbalancer to the cluster.
// NB. this happens automatically when there are more than two control plane instances, but with this flag
// it is possible to override the default behaviour
func ExternalLoadBalancer(externalLoadBalancer bool) CreateOption {
return func(c *CreateOptions) {
c.externalLoadBalancer = externalLoadBalancer
}
}
// Retain option instructs create cluster to preserve node in case of errors for debugging purposes
func Retain(retain bool) CreateOption {
return func(c *CreateOptions) {
c.retain = retain
}
}
// Volumes option instructs create cluster to add volumes to the node containers
func Volumes(volumes []string) CreateOption {
return func(c *CreateOptions) {
c.volumes = volumes
}
}
// CreateCluster creates a new kinder cluster
func CreateCluster(clusterName string, options ...CreateOption) error {
flags := &CreateOptions{}
for _, o := range options {
o(flags)
}
// Check if the cluster name already exists
known, err := status.IsKnown(clusterName)
if err != nil {
return err
}
if known {
return errors.Errorf("a cluster with the name %q already exists", clusterName)
}
fmt.Printf("Creating cluster %q ...\n", clusterName)
// attempt to explicitly pull the required node image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
ensureNodeImage(flags.image)
handleErr := func(err error) error {
// In case of errors nodes are deleted (except if retain is explicitly set)
if !flags.retain {
if c, err := status.FromDocker(clusterName); err != nil {
log.Error(err)
} else {
for _, n := range c.AllNodes() {
if err := exec.NewHostCmd(
"docker",
"rm",
"-f", // force the container to be deleted now
"-v", // delete volumes
n.Name(),
).Run(); err != nil {
return errors.Wrapf(err, "failed to delete node %s", n.Name())
}
}
}
}
log.Error(err)
return err
}
// Create node containers as defined in the kind config
if err := createNodes(
clusterName,
flags,
); err != nil {
return handleErr(err)
}
fmt.Println()
fmt.Printf("Nodes creation complete. You can now continue creating a Kubernetes cluster using\n")
fmt.Printf("kinder do, the kinder swiss knife 🚀!\n")
return nil
}
func createNodes(clusterName string, flags *CreateOptions) error {
// compute the desired nodes, and inform the user that we are setting them up
desiredNodes := nodesToCreate(clusterName, flags)
numberOfNodes := len(desiredNodes)
if flags.externalEtcd {
| mt.Printf("Preparing nodes %s\n", strings.Repeat("📦", numberOfNodes))
// detect CRI runtime installed into images before actually creating nodes
runtime, err := status.InspectCRIinImage(flags.image)
if err != nil {
log.Errorf("Error detecting CRI for images %s! %v", flags.image, err)
return err
}
log.Infof("Detected %s container runtime for image %s", runtime, flags.image)
createHelper, err := nodes.NewCreateHelper(runtime)
if err != nil {
log.Errorf("Error creating NewCreateHelper for CRI %s! %v", flags.image, err)
return err
}
// create all of the node containers, concurrently
fns := []func() error{}
for _, desiredNode := range desiredNodes {
desiredNode := desiredNode // capture loop variable
fns = append(fns, func() error {
switch desiredNode.Role {
case constants.ExternalLoadBalancerNodeRoleValue:
return createHelper.CreateExternalLoadBalancer(clusterName, desiredNode.Name)
case constants.ControlPlaneNodeRoleValue, constants.WorkerNodeRoleValue:
return createHelper.CreateNode(clusterName, desiredNode.Name, flags.image, desiredNode.Role, flags.volumes)
default:
return nil
}
})
}
log.Info("Creating nodes...")
if err := untilError(fns); err != nil {
return err
}
// add an external etcd if explicitly requested
if flags.externalEtcd {
log.Info("Getting required etcd image...")
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
etcdImage, err := c.BootstrapControlPlane().EtcdImage()
if err != nil {
return err
}
// attempt to explicitly pull the etcdImage if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = host.PullImage(etcdImage, 4)
log.Info("Creating external etcd...")
if err := createHelper.CreateExternalEtcd(clusterName, fmt.Sprintf("%s-etcd", clusterName), etcdImage); err != nil {
return err
}
}
// wait for all node containers to have a Running status
log.Info("Waiting for all nodes to start...")
timeout := time.Second * 40
for _, n := range desiredNodes {
var lastErr error
log.Infof("Waiting for node %s to start...", n.Name)
err = wait.PollImmediate(time.Second*1, timeout, func() (bool, error) {
lines, err := exec.NewHostCmd(
"docker",
"container",
"inspect",
"-f",
"'{{.State.Running}}'",
n.Name,
).RunAndCapture()
if err == nil && len(lines) > 0 && lines[0] == `'true'` {
return true, nil
}
lastErr = errors.Errorf("node state is not Running, error: %v, output lines: %+v, ", err, lines)
return false, nil
})
if err != nil {
return errors.Wrapf(lastErr, "node %s did not start in %v", n.Name, timeout)
}
}
// get the cluster
c, err := status.FromDocker(clusterName)
if err != nil {
return err
}
c.Settings = &status.ClusterSettings{
IPFamily: status.IPv4Family, // only IPv4 is tested with kinder
}
// TODO: the cluster and node settings are currently unused by kinder
// Enable these writes if settings have to stored on the nodes
//
// // write to the nodes the cluster settings that will be re-used by kinder during the cluster lifecycle.
//
// if err := c.WriteSettings(); err != nil {
// return err
// }
//
// for _, n := range c.K8sNodes() {
// if err := n.WriteNodeSettings(&status.NodeSettings{}); err != nil {
// return err
// }
// }
return nil
}
// nodeSpec describes a node to create purely from the container aspect
// this does not include eg starting kubernetes (see actions for that)
type nodeSpec struct {
Name string
Role string
}
// nodesToCreate return the list of nodes to create for the cluster
func nodesToCreate(clusterName string, flags *CreateOptions) []nodeSpec {
var desiredNodes []nodeSpec
// prepare nodes explicitly
for n := 0; n < flags.controlPlanes; n++ {
role := constants.ControlPlaneNodeRoleValue
desiredNode := nodeSpec{
Name: fmt.Sprintf("%s-%s-%d", clusterName, role, n+1),
Role: role,
}
desiredNodes = append(desiredNodes, desiredNode)
}
for n := 0; | numberOfNodes++
}
f | conditional_block |
command_server.rs | worker that is registered interest to its Uuid
fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) {
let mut al_inner = al.lock().expect("Unable to unlock al n send_messages");
let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid );
if pos_opt.is_some() {
pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future");
}
}
/// Utility struct for keeping track of the UUIDs of Responses that workers are
/// interested in and holding Completes to let them know when they are received
impl AlertList {
pub fn new() -> AlertList {
AlertList {
list: Vec::new(),
}
}
/// Register interest in Results with a specified Uuid and send
/// the Result over the specified Oneshot when it's received
pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) {
self.list.push((*response_uuid, c));
}
/// Deregisters a listener if a timeout in the case of a timeout occuring
pub fn deregister(&mut self, uuid: &Uuid) {
let pos_opt = self.list.iter().position(|x| &x.0 == uuid );
match pos_opt {
Some(pos) => { self.list.remove(pos); },
None => println!("Error deregistering element from interest list; it's not in it"),
}
}
}
#[derive(Clone)]
pub struct CommandServer {
al: Arc<Mutex<AlertList>>,
command_queue: CommandQueue, // internal command queue
conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss
client: redis::Client,
instance: Instance, // The instance that owns this CommandServer
}
/// Locks the `CommandQueue` and returns a queued command, if there are any.
fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> {
let mut qq_inner = command_queue.lock()
.expect("Unable to unlock qq_inner in try_get_new_command");
qq_inner.pop_front()
}
fn send_command_outer(
al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>,
command_queue: CommandQueue, mut attempts: usize, commands_channel: String
) {
let wr_cmd = command.wrap();
let _ = send_command(&wr_cmd, client, commands_channel.as_str());
let (sleepy_c, sleepy_o) = oneshot::<Thread>();
let (awake_c, awake_o) = oneshot::<Result<Response, ()>>();
// start the timeout timer on a separate thread
let dur = Duration::from_millis(CONF.cs_timeout as u64);
let timeout_msg = TimeoutRequest {
dur: dur,
thread_future: sleepy_c,
timeout_future: awake_c
};
sleeper_tx.send(timeout_msg).unwrap();
// sleepy_o fulfills immediately to a handle to the sleeper thread
let sleepy_handle = sleepy_o.wait();
// UnboundedSender for giving to the AlertList and sending the response back
let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>();
// register interest in new Responses coming in with our Command's Uuid
{
al.lock().expect("Unlock to lock al in send_command_outer #1")
.register(&wr_cmd.uuid, res_recvd_c);
}
res_recvd_o.into_future().map(|(item_opt, _)| {
item_opt.expect("item_opt was None")
}).map_err(|_| Canceled ).select(awake_o).and_then(move |res| {
let (status, _) = res;
match status {
Ok(wrapped_res) => { // command received
{
// deregister since we're only waiting on one message
al.lock().expect("Unlock to lock al in send_command_outer #2")
.deregister(&wr_cmd.uuid);
}
// end the timeout now so that we can re-use sleeper thread
sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark();
// resolve the Response future
res_c.complete(Ok(wrapped_res));
return Ok(sleeper_tx)
},
Err(_) => { // timed out
{
al.lock().expect("Couldn't lock al in Err(_)")
.deregister(&wr_cmd.uuid);
}
attempts += 1;
if attempts >= CONF.cs_max_retries {
// Let the main thread know it's safe to use the UnboundedSender again
// This essentially indicates that the worker thread is idle
let err_msg = String::from_str("Timed out too many times!").unwrap();
res_c.complete(Err(err_msg));
return Ok(sleeper_tx)
} else { // re-send the command
// we can do this recursively since it's only a few retries
send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c,
command_queue, attempts, commands_channel)
}
}
}
Ok(sleeper_tx)
}).wait().ok().unwrap(); // block until a response is received or the command times out
}
/// Manually loop over the converted Stream of commands
fn dispatch_worker(
work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue
) -> Option<()> {
let (cr, idle_c) = work;
// completes initial command and internally iterates until queue is empty
send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
// keep trying to get queued commands to execute until the queue is empty;
while let Some(cr) = try_get_new_command(command_queue.clone()) {
send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
}
idle_c.complete(());
Some(())
}
/// Blocks the current thread until a Duration+Complete is received.
/// Then it sleeps for that Duration and Completes the oneshot upon awakening.
/// Returns a Complete upon starting that can be used to end the timeout early
fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) {
for res in rx.wait() {
match res.unwrap() {
TimeoutRequest{dur, thread_future, timeout_future} => {
// send a Complete with a handle to the thread
thread_future.complete(thread::current());
thread::park_timeout(dur);
timeout_future.complete(Err(()));
}
}
}
}
/// Creates a command processor that awaits requests
fn init_command_processor(
cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList>
) {
let mut client = get_client(CONF.redis_host);
// channel for communicating with the sleeper thread
let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>();
thread::spawn(move || init_sleeper(sleeper_rx) );
for task in cmd_rx.wait() {
let res = dispatch_worker(
task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone()
);
// exit if we're in the process of collapse
if res.is_none() {
break;
}
}
}
impl CommandServer {
pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer {
let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders);
let command_queue = Arc::new(Mutex::new(VecDeque::new()));
let al = Arc::new(Mutex::new(AlertList::new()));
let al_clone = al.clone();
// Handle newly received Responses
let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel);
thread::spawn(move || {
for raw_res_res in rx.wait() {
let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread.");
let parsed_res = parse_wrapped_response(raw_res);
send_messages(parsed_res, &*al_clone);
}
});
for _ in 0..CONF.conn_senders {
let al_clone = al.clone();
let qq_copy = command_queue.clone();
// channel for getting the UnboundedSender back from the worker thread
let (tx, rx) = unbounded::<WorkerTask>();
thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) );
// store the UnboundedSender which can be used to send queries
// to the worker in the connection queue
conn_queue.push_back(tx);
}
let client = get_client(CONF.redis_host);
CommandServer {
al: al,
command_queue: command_queue,
conn_queue: Arc::new(Mutex::new(conn_queue)),
client: client,
instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), },
}
}
/// Queues up a command to send to be sent. Returns a future that resolves to
/// the returned response.
pub fn | execute | identifier_name |
|
command_server.rs | Results from the Tick Processor will be sent if they
/// match the ID of the request the command `UnboundedSender` thread sent.
struct AlertList {
// Vec to hold the ids of responses we're waiting for and `Sender`s
// to send the result back to the worker thread
// Wrapped in Arc<Mutex<>> so that it can be accessed from within futures
pub list: RegisteredList,
}
/// Send out the Response to a worker that is registered interest to its Uuid
fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) {
let mut al_inner = al.lock().expect("Unable to unlock al n send_messages");
let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid );
if pos_opt.is_some() {
pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future");
}
}
/// Utility struct for keeping track of the UUIDs of Responses that workers are
/// interested in and holding Completes to let them know when they are received
impl AlertList {
pub fn new() -> AlertList {
AlertList {
list: Vec::new(),
}
}
/// Register interest in Results with a specified Uuid and send
/// the Result over the specified Oneshot when it's received
pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) {
self.list.push((*response_uuid, c));
}
/// Deregisters a listener if a timeout in the case of a timeout occuring
pub fn deregister(&mut self, uuid: &Uuid) {
let pos_opt = self.list.iter().position(|x| &x.0 == uuid );
match pos_opt {
Some(pos) => { self.list.remove(pos); },
None => println!("Error deregistering element from interest list; it's not in it"),
}
}
}
#[derive(Clone)]
pub struct CommandServer {
al: Arc<Mutex<AlertList>>,
command_queue: CommandQueue, // internal command queue
conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss
client: redis::Client,
instance: Instance, // The instance that owns this CommandServer
}
/// Locks the `CommandQueue` and returns a queued command, if there are any.
fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> {
let mut qq_inner = command_queue.lock()
.expect("Unable to unlock qq_inner in try_get_new_command");
qq_inner.pop_front()
}
fn send_command_outer(
al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>,
command_queue: CommandQueue, mut attempts: usize, commands_channel: String
) {
let wr_cmd = command.wrap();
let _ = send_command(&wr_cmd, client, commands_channel.as_str());
let (sleepy_c, sleepy_o) = oneshot::<Thread>();
let (awake_c, awake_o) = oneshot::<Result<Response, ()>>();
// start the timeout timer on a separate thread
let dur = Duration::from_millis(CONF.cs_timeout as u64);
let timeout_msg = TimeoutRequest {
dur: dur,
thread_future: sleepy_c,
timeout_future: awake_c
};
sleeper_tx.send(timeout_msg).unwrap();
// sleepy_o fulfills immediately to a handle to the sleeper thread
let sleepy_handle = sleepy_o.wait();
// UnboundedSender for giving to the AlertList and sending the response back
let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>();
// register interest in new Responses coming in with our Command's Uuid
{
al.lock().expect("Unlock to lock al in send_command_outer #1")
.register(&wr_cmd.uuid, res_recvd_c);
}
res_recvd_o.into_future().map(|(item_opt, _)| {
item_opt.expect("item_opt was None")
}).map_err(|_| Canceled ).select(awake_o).and_then(move |res| {
let (status, _) = res;
match status {
Ok(wrapped_res) => { // command received
{
// deregister since we're only waiting on one message
al.lock().expect("Unlock to lock al in send_command_outer #2")
.deregister(&wr_cmd.uuid);
}
// end the timeout now so that we can re-use sleeper thread
sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark();
// resolve the Response future
res_c.complete(Ok(wrapped_res));
return Ok(sleeper_tx)
},
Err(_) => { // timed out
{
al.lock().expect("Couldn't lock al in Err(_)")
.deregister(&wr_cmd.uuid);
}
attempts += 1;
if attempts >= CONF.cs_max_retries {
// Let the main thread know it's safe to use the UnboundedSender again
// This essentially indicates that the worker thread is idle
let err_msg = String::from_str("Timed out too many times!").unwrap();
res_c.complete(Err(err_msg));
return Ok(sleeper_tx)
} else { // re-send the command
// we can do this recursively since it's only a few retries
send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c,
command_queue, attempts, commands_channel)
}
}
}
Ok(sleeper_tx)
}).wait().ok().unwrap(); // block until a response is received or the command times out
}
/// Manually loop over the converted Stream of commands
fn dispatch_worker(
work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue
) -> Option<()> {
let (cr, idle_c) = work;
// completes initial command and internally iterates until queue is empty
send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
// keep trying to get queued commands to execute until the queue is empty;
while let Some(cr) = try_get_new_command(command_queue.clone()) {
send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
}
idle_c.complete(());
Some(())
}
/// Blocks the current thread until a Duration+Complete is received.
/// Then it sleeps for that Duration and Completes the oneshot upon awakening.
/// Returns a Complete upon starting that can be used to end the timeout early
fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) {
for res in rx.wait() {
match res.unwrap() {
TimeoutRequest{dur, thread_future, timeout_future} => {
// send a Complete with a handle to the thread
thread_future.complete(thread::current());
thread::park_timeout(dur);
timeout_future.complete(Err(()));
}
}
}
}
/// Creates a command processor that awaits requests
fn init_command_processor(
cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList>
) {
let mut client = get_client(CONF.redis_host);
// channel for communicating with the sleeper thread
let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>();
thread::spawn(move || init_sleeper(sleeper_rx) ); | for task in cmd_rx.wait() {
let res = dispatch_worker(
task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone()
);
// exit if we're in the process of collapse
if res.is_none() {
break;
}
}
}
impl CommandServer {
pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer {
let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders);
let command_queue = Arc::new(Mutex::new(VecDeque::new()));
let al = Arc::new(Mutex::new(AlertList::new()));
let al_clone = al.clone();
// Handle newly received Responses
let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel);
thread::spawn(move || {
for raw_res_res in rx.wait() {
let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread.");
let parsed_res = parse_wrapped_response(raw_res);
send_messages(parsed_res, &*al_clone);
}
});
for _ in 0..CONF.conn_senders {
let al_clone = al.clone();
let qq_copy = command_queue.clone();
// channel for getting the UnboundedSender back from the worker thread
let (tx, rx) = unbounded::<WorkerTask>();
thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) );
// store the UnboundedSender which can be used to send queries
// to the worker in the connection queue
conn_queue.push_back(tx);
}
let client = get_client | random_line_split |
|
command_server.rs | from the Tick Processor will be sent if they
/// match the ID of the request the command `UnboundedSender` thread sent.
struct AlertList {
// Vec to hold the ids of responses we're waiting for and `Sender`s
// to send the result back to the worker thread
// Wrapped in Arc<Mutex<>> so that it can be accessed from within futures
pub list: RegisteredList,
}
/// Send out the Response to a worker that is registered interest to its Uuid
fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) {
let mut al_inner = al.lock().expect("Unable to unlock al n send_messages");
let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid );
if pos_opt.is_some() {
pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future");
}
}
/// Utility struct for keeping track of the UUIDs of Responses that workers are
/// interested in and holding Completes to let them know when they are received
impl AlertList {
pub fn new() -> AlertList {
AlertList {
list: Vec::new(),
}
}
/// Register interest in Results with a specified Uuid and send
/// the Result over the specified Oneshot when it's received
pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) {
self.list.push((*response_uuid, c));
}
/// Deregisters a listener if a timeout in the case of a timeout occuring
pub fn deregister(&mut self, uuid: &Uuid) {
let pos_opt = self.list.iter().position(|x| &x.0 == uuid );
match pos_opt {
Some(pos) => { self.list.remove(pos); },
None => println!("Error deregistering element from interest list; it's not in it"),
}
}
}
#[derive(Clone)]
pub struct CommandServer {
al: Arc<Mutex<AlertList>>,
command_queue: CommandQueue, // internal command queue
conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss
client: redis::Client,
instance: Instance, // The instance that owns this CommandServer
}
/// Locks the `CommandQueue` and returns a queued command, if there are any.
fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> {
let mut qq_inner = command_queue.lock()
.expect("Unable to unlock qq_inner in try_get_new_command");
qq_inner.pop_front()
}
fn send_command_outer(
al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>,
command_queue: CommandQueue, mut attempts: usize, commands_channel: String
) {
let wr_cmd = command.wrap();
let _ = send_command(&wr_cmd, client, commands_channel.as_str());
let (sleepy_c, sleepy_o) = oneshot::<Thread>();
let (awake_c, awake_o) = oneshot::<Result<Response, ()>>();
// start the timeout timer on a separate thread
let dur = Duration::from_millis(CONF.cs_timeout as u64);
let timeout_msg = TimeoutRequest {
dur: dur,
thread_future: sleepy_c,
timeout_future: awake_c
};
sleeper_tx.send(timeout_msg).unwrap();
// sleepy_o fulfills immediately to a handle to the sleeper thread
let sleepy_handle = sleepy_o.wait();
// UnboundedSender for giving to the AlertList and sending the response back
let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>();
// register interest in new Responses coming in with our Command's Uuid
{
al.lock().expect("Unlock to lock al in send_command_outer #1")
.register(&wr_cmd.uuid, res_recvd_c);
}
res_recvd_o.into_future().map(|(item_opt, _)| {
item_opt.expect("item_opt was None")
}).map_err(|_| Canceled ).select(awake_o).and_then(move |res| {
let (status, _) = res;
match status {
Ok(wrapped_res) => { // command received
{
// deregister since we're only waiting on one message
al.lock().expect("Unlock to lock al in send_command_outer #2")
.deregister(&wr_cmd.uuid);
}
// end the timeout now so that we can re-use sleeper thread
sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark();
// resolve the Response future
res_c.complete(Ok(wrapped_res));
return Ok(sleeper_tx)
},
Err(_) => |
}
Ok(sleeper_tx)
}).wait().ok().unwrap(); // block until a response is received or the command times out
}
/// Manually loop over the converted Stream of commands
fn dispatch_worker(
work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue
) -> Option<()> {
let (cr, idle_c) = work;
// completes initial command and internally iterates until queue is empty
send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
// keep trying to get queued commands to execute until the queue is empty;
while let Some(cr) = try_get_new_command(command_queue.clone()) {
send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
}
idle_c.complete(());
Some(())
}
/// Blocks the current thread until a Duration+Complete is received.
/// Then it sleeps for that Duration and Completes the oneshot upon awakening.
/// Returns a Complete upon starting that can be used to end the timeout early
fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) {
for res in rx.wait() {
match res.unwrap() {
TimeoutRequest{dur, thread_future, timeout_future} => {
// send a Complete with a handle to the thread
thread_future.complete(thread::current());
thread::park_timeout(dur);
timeout_future.complete(Err(()));
}
}
}
}
/// Creates a command processor that awaits requests
fn init_command_processor(
cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList>
) {
let mut client = get_client(CONF.redis_host);
// channel for communicating with the sleeper thread
let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>();
thread::spawn(move || init_sleeper(sleeper_rx) );
for task in cmd_rx.wait() {
let res = dispatch_worker(
task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone()
);
// exit if we're in the process of collapse
if res.is_none() {
break;
}
}
}
impl CommandServer {
pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer {
let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders);
let command_queue = Arc::new(Mutex::new(VecDeque::new()));
let al = Arc::new(Mutex::new(AlertList::new()));
let al_clone = al.clone();
// Handle newly received Responses
let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel);
thread::spawn(move || {
for raw_res_res in rx.wait() {
let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread.");
let parsed_res = parse_wrapped_response(raw_res);
send_messages(parsed_res, &*al_clone);
}
});
for _ in 0..CONF.conn_senders {
let al_clone = al.clone();
let qq_copy = command_queue.clone();
// channel for getting the UnboundedSender back from the worker thread
let (tx, rx) = unbounded::<WorkerTask>();
thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) );
// store the UnboundedSender which can be used to send queries
// to the worker in the connection queue
conn_queue.push_back(tx);
}
let client = get | { // timed out
{
al.lock().expect("Couldn't lock al in Err(_)")
.deregister(&wr_cmd.uuid);
}
attempts += 1;
if attempts >= CONF.cs_max_retries {
// Let the main thread know it's safe to use the UnboundedSender again
// This essentially indicates that the worker thread is idle
let err_msg = String::from_str("Timed out too many times!").unwrap();
res_c.complete(Err(err_msg));
return Ok(sleeper_tx)
} else { // re-send the command
// we can do this recursively since it's only a few retries
send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c,
command_queue, attempts, commands_channel)
}
} | conditional_block |
Workload.js | i = 0; i < data.length; i++) {
var dd = data[i];
var month = new Date(dd.outTime).getMonth() + 1;
if (load[month] == undefined) {
load[month] = 1;
} else {
load[month]++;
}
}
} else if (type == 1) {//月
//统计一年中每个月的工作量
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
} else if (type == 2) {//日
for (i = 0; i < data.length; i++) {
var dd = data[i];
var hour = new Date(dd.outTime).getHours();
if (load[hour] == undefined) {
load[hour] = 1;
} else {
load[hour]++;
}
}
} else if (type == 3) {
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
}
//把空值设为0
if (type == 3) {
for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
} else {
for (i = 0; i <= Workload.rectNum; i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
}
Workload.Data = load;
Workload.type = type;
draw(canvas);
}
//type,0,代表年,1代表月,2代表日
//开始绘画
function draw(canvas) {
var context = canvas.getContext("2d");
context.fillStyle = "#ffffff";
context.fillRect(0, 0, Workload.width(), Workload.height());
context.font = "10px Georgia";
lineXY(context);
drawCloseButton(context);
Workload.init();
myClearInterval();
drawPic(context);
}
function myClearInterval() {
//清除所有动画
for (var i = 0; i < time.length; i++) {
clearInterval(time[i]);
}
time.length = 0;
}
function drawCloseButton(context) {
var image = new Image();
image.src = "../images/index/close.png";
var imagex = Workload.width() - 37;
var imagey = 5;
if (image.complete) {
context.drawImage(image, imagex, imagey);
} else {
image.onload = function () {
context.drawImage(image, imagex, imagey);
}
}
}
//循环每一个矩形
function drawPic(context) {
for (var i = 0; i <= Workload.rectNum; i++) {
if (i == 0) {
continue;
}
aniH(context, i, 0);
}
open();
}
//动画实现
//-----bug---动画进行中切换数据会出错
var time = [];
function aniH(context, i, hh) {
var geadd = Workload.limitY / 2500;
var getime = 10;
var signTime = setInterval(function () {
//通过i-1,把i==0,占用的空间去掉
createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i);
if (hh >= (Workload.type == 3 ? Workload.Data[Workload.day + i - 1] : Workload.Data[i])) {
window.clearInterval(signTime);
createRect(context, Workload.getX(Workload.unitLengthX * ((i - 1) * 2 + 1)), Workload.unitLengthX, hh += getime * geadd, i, true);
}
}, 10);
time.push(signTime);
}
//画出xy轴
function lineXY(context) {
//画x,y轴
context.beginPath();
Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(0), Workload.getY(Workload.getYLength()));
Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) - Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength);
Workload.drawLine(context, Workload.getX(0), Workload.getY(Workload.getYLength()), Workload.getX(0) + Workload.arrowLength, Workload.getY(Workload.getYLength()) + Workload.arrowLength);
context.fillText("工作量", Workload.getX(0), Workload.getY(Workload.getYLength()) - 5);
context.stroke();
Workload.drawLine(context, Workload.getX(0), Workload.getY(0), Workload.getX(Workload.getXLength()), Workload.getY(0));
Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) - Workload.arrowLength);
Workload.drawLine(context, Workload.getX(Workload.getXLength()), Workload.getY(0), Workload.getX(Workload.getXLength()) - Workload.arrowLength, Workload.getY(0) + Workload.arrowLength);
context.fillText("时间", Workload.getX(Workload.getXLength()) - 20, Workload.getY(0) - 20);
context.stroke();
//画xy轴上的字
//y轴
context.fillText(Workload.limitY + "", Workload.getX(-20), Workload.getY(Workload.getRectMaxH()));
context.fillText("啦啦啦", 20, 80);
context.stroke();
}
//创建矩形类,同时添加鼠标响应事件,同时把事件响应注册到数组里
function createRect(context, x, width, height, whichDay, isAddToEvent) {
//每创建一个矩形就把它画出来
var y = height * Workload.unitLengthY();
if (isAddToEvent) {
var o = new Object();
o.x = x;
o.width = width;
o.height = height;
o.whichDay = whichDay;
o.y = Workload.getY(0) - y;
o.mouseMove = function (e) {
switch (Workload.type) {
case 0:
toast.innerHTML = Workload.year + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件";
break;
case 1:
toast.innerHTML = Workload.year + "-" + Workload.month + "-" + this.whichDay + " " + Workload.Data[this.whichDay] + "件";
break;
case 2:
toast.innerHTML = this.whichDay + "点" + Workload.Data[this.whichDay] + "件";
break;
case 3:
toast.innerHTML = this.whichDay - 1 + Workload.day + "天" + Workload.Data[this.whichDay - 1 + Workload.day] + "件";
break;
}
toast.style.display = "block";
toast.style.left = e.x + 20 + "px";
toast.style.top = e.y + 20 + "px";
};
Workload.mouseEvent.push(o);
}
context.fillStyle = "#42b983";
context.fillRect(x, Workload.getY(0) - y, width, y);
}
//添加事件到DOM上面
canvas.addEventListener("mousemove", function (e) {
var onRect = false;
//循环判断在哪个矩形上面
for (var i = 0; i < Workload.mouseEvent.length; i++) {
var x = e.clientX - e.target.offsetLeft;
var y = e.clientY - e.target.offsetTop;
var oo = Workload.mouseEvent[i];
if (x >= oo.x && x <= oo.x + oo.width && y >= oo.y && y <= Workload.getY(0)) {
oo.mouseMove(e);
onRect = true;
}
}
//如果鼠标不在任何一个矩形上面,那么让显示块消失
if (!onRect) {
toast.innerHTML = "";
toast.style.display = "none";
}
});
canvas.addEventListener("click", function (e) {
//如果点击范围在关闭按钮上,那么关闭
if (e.clientX - e.target.offsetLeft >= Workload.width() - 37 && e.clientY - e.target.offsetTop <= 37) {
close();
}
});
function close() {
$(canvas).attr("class", "workload_canvas");
ReactDOM.render(React.createElement(EmptyComponent, null),
document.getElementById("workload_input_container"))
}
function open() {
$(canvas).attr("class", "workload_canvas_on workload_canvas");
} | identifier_name |
||
Workload.js | setXLimit: function () {
//this.rectNum = this.Data.length;
switch (this.type) {
case 0:
Workload.unitLengthX = (Workload.getXLength() - 40) / 24;
break;
case 1:
Workload.unitLengthX = (Workload.getXLength() - 40) / 62;
break;
case 2:
Workload.unitLengthX = (Workload.getXLength() - 40) / 48;
break;
case 3:
if (this.rectNum != undefined) {
Workload.unitLengthX = (Workload.getXLength() - 40) / (2 * this.rectNum);
}
break;
}
},
init: function () {
this.mouseEvent.length = 0;
this.setXLimit();
this.type = parseInt(this.type);
this.year = parseInt(this.year);
this.month = parseInt(this.month);
this.day = parseInt(this.day);
this.days = parseInt(this.days);
this.rectNum = parseInt(this.rectNum);
var dd = this.Data;
this.limitY = Math.max.apply(null, dd);
}
};
/**
* 工作量容器
*/
var WorkloadInputComponent = React.createClass({displayName: "WorkloadInputComponent",
getInitialState: function () {
return {
year: "2010",
month: "1",
day: "1",
toDay: "",
}
},
yearChange: function (e) {
var value = e.target.value;
this.setState({year: value});
loadData(0, value);
},
monthChange: function (e) {
var value = e.target.value;
this.setState({month: value});
loadData(1, this.state.year, value);
},
dayChange: function (e) {
var value = e.target.value;
this.setState({day: value});
loadData(2, this.state.year, this.state.month, value);
},
inputBlur: function (e) {
var value = e.target.value;
if (value != "") {
this.setState({toDay: value});
loadData(3, this.state.year, this.state.month, this.state.day, value);
}
},
render: function () {
var month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
var year = [2010, 2011, 2012, 2013, 2014, 2015, 2016];
var day = [];
for (var i = 1; i <= 30; i++) {
day.push(i);
}
return (
React.createElement("div", null,
React.createElement("select", {onChange: this.yearChange, className: "form-control"},
year.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("select", {onChange: this.monthChange, className: "form-control"},
month.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("select", {onChange: this.dayChange, className: "form-control"},
day.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("input", {onBlur: this.inputBlur, type: "number", className: "form-control"})
)
);
}
});
function loadData(type, year, month, day, toDay) {
//type 1,year,2month,3day,4today
//初始化workload类中的参数
if (month < 10) {
month = "0" + month; | }
if (day < 10) {
day = "0" + day;
}
var fromTime = "";
var days = 0;
switch (type) {
case 0:
Workload.year = year;
fromTime = year + "-01-01";
Workload.days = 365;
Workload.rectNum = 12;
break;
case 1:
Workload.month = month;
fromTime = year + "-" + month + "-01";
Workload.days = 31;
Workload.rectNum = 31;
break;
case 2:
Workload.day = day;
fromTime = year + "-" + month + "-" + day;
Workload.days = 1;
Workload.rectNum = 24;
break;
case 3:
Workload.days = toDay;
Workload.rectNum = toDay;
fromTime = year + "-" + month + "-" + day;
break;
}
var url = "";
if (Workload.whichRequest == 0) {//0是个人工作量
url = "/REST/Domain/getWork/employeeId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days;
} else if (Workload.whichRequest == 1) {//1是网点工作量
url = "/REST/Domain/getWorkOfOutlets/outletId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days;
}
Tools.myAjax({
type: "get",
url: url,
success: function (data) {
//通过时间区分工作量
handleData(data, type);
},
error: function (data) {
console.error(data);
showDialog("dialog", "错误", "获取工作量错误fromtime:" + fromTime + "day:" + Workload.days, true);
}
});
}
function handleData(data, type) {
var load = [];
var i = 0;
if (type == 0) {
//统计一年中每个月的工作量
for (i = 0; i < data.length; i++) {
var dd = data[i];
var month = new Date(dd.outTime).getMonth() + 1;
if (load[month] == undefined) {
load[month] = 1;
} else {
load[month]++;
}
}
} else if (type == 1) {//月
//统计一年中每个月的工作量
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
} else if (type == 2) {//日
for (i = 0; i < data.length; i++) {
var dd = data[i];
var hour = new Date(dd.outTime).getHours();
if (load[hour] == undefined) {
load[hour] = 1;
} else {
load[hour]++;
}
}
} else if (type == 3) {
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
}
//把空值设为0
if (type == 3) {
for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
} else {
for (i = 0; i <= Workload.rectNum; i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
}
Workload.Data = load;
Workload.type = type;
draw(canvas);
}
//type,0,代表年,1代表月,2代表日
//开始绘画
function draw(canvas) {
var context = canvas.getContext("2d");
context.fillStyle = "#ffffff";
context.fillRect(0, 0, Workload.width(), Workload.height());
context.font = "10px Georgia";
lineXY(context);
drawCloseButton(context);
Workload.init();
myClearInterval();
drawPic(context);
}
function myClearInterval() {
//清除所有动画
for (var i = 0; i < time.length; i++) {
clearInterval(time[i]);
}
time.length = 0;
}
function drawCloseButton(context) {
var image = new Image();
image.src = "../images/index/close.png";
var imagex = Workload.width() - 37;
var imagey = 5;
if (image.complete) {
context.drawImage(image, imagex, imagey);
} else {
image.onload = function () {
context.drawImage(image, imagex, imagey);
| random_line_split |
|
Workload.js | setXLimit: function () {
//this.rectNum = this.Data.length;
switch (this.type) {
case 0:
Workload.unitLengthX = (Workload.getXLength() - 40) / 24;
break;
case 1:
Workload.unitLengthX = (Workload.getXLength() - 40) / 62;
break;
case 2:
Workload.unitLengthX = (Workload.getXLength() - 40) / 48;
break;
case 3:
if (this.rectNum != undefined) {
Workload.unitLengthX = (Workload.getXLength() - 40) / (2 * this.rectNum);
}
break;
}
},
init: function () {
this.mouseEvent.length = 0;
this.setXLimit();
this.type = parseInt(this.type);
this.year = parseInt(this.year);
this.month = parseInt(this.month);
this.day = parseInt(this.day);
this.days = parseInt(this.days);
this.rectNum = parseInt(this.rectNum);
var dd = this.Data;
this.limitY = Math.max.apply(null, dd);
}
};
/**
* 工作量容器
*/
var WorkloadInputComponent = React.createClass({displayName: "WorkloadInputComponent",
getInitialState: function () {
return {
year: "2010",
month: "1",
day: "1",
toDay: "",
}
},
yearChange: function (e) {
var value = e.target.value;
this.setState({year: value});
loadData(0, value);
},
monthChange: function (e) {
var value = e.target.value;
this.setState({month: value});
loadData(1, this.state.year, value);
},
dayChange: function (e) {
var value = e.target.value;
this.setState({day: value});
loadData(2, this.state.year, this.state.month, value);
},
inputBlur: function (e) {
var value = e.target.value;
if (value != "") {
this.setState({toDay: value});
loadData(3, this.state.year, this.state.month, this.state.day, value);
}
},
render: function () {
var month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
var year = [2010, 2011, 2012, 2013, 2014, 2015, 2016];
var day = [];
for (var i = 1; i <= 30; i++) {
day.push(i);
}
return (
React.createElement("div", null,
React.createElement("select", {onChange: this.yearChange, className: "form-control"},
year.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("select", {onChange: this.monthChange, className: "form-control"},
month.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("select", {onChange: this.dayChange, className: "form-control"},
day.map(function (data, index) {
return React.createElement("option", {key: "option"+index, value: data}, data)
})
),
React.createElement("input", {onBlur: this.inputBlur, type: "number", className: "form-control"})
)
);
}
});
function loadData(type, year, month, day, toDay) {
//type 1,year,2month,3day,4today
//初始化workload类中的参数
if (month < 10) {
month = "0" + month;
}
if (day < 10) {
day = "0" + day;
}
var fromTime = "";
var days = 0;
switch (type) {
case 0:
Workload.year = year;
fromTime = year + "-01-01";
Workload.days = 365;
Workload.rectNum = 12;
break;
case 1:
Workload.month = month;
fromTime = year + "-" + month + "-01";
Workload.days = 31;
Workload.rectNum = 31;
break;
case 2:
Workload.day = day;
fromTime = year + "-" + month + "-" + day;
Workload.days = 1;
Workload.rectNum = 24;
break;
case 3:
Workload.days = toDay;
Workload.rectNum = toDay;
fromTime = year + "-" + month + "-" + day;
break;
}
var url = "";
if (Workload.whichRequest == 0) {//0是个人工作量
url = "/REST/Domain/getWork/employeeId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days;
} else if (Workload.whichRequest == 1) {//1是网点工作量
url = "/REST/Domain/getWorkOfOutlets/outletId/" + Workload.id + "/starttime/" + fromTime + "/days/" + Workload.days;
}
Tools.myAjax({
type: "get",
url: url,
success: function (data) {
//通过时间区分工作量
handleData(data, type);
},
error: function (data) {
console.error(data);
showDialog("dialog", "错误", "获取工作量错误fromtime:" + fromTime + "day:" + Workload.days, true);
}
});
}
function handleData(data, type) {
var load = [];
var i = 0;
if (type == 0) {
//统计一年中每个月的工作量
for (i = 0; i < data.length; i++) {
var dd = data[i];
var month = new Date(dd.outTime).getMonth() + 1;
if (load[month] == undefined) {
load[month] = 1;
} else {
| } else {
load[hour]++;
}
}
} else if (type == 3) {
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
}
//把空值设为0
if (type == 3) {
for (i = 0; i <= parseInt(Workload.day) + parseInt(Workload.days); i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
} else {
for (i = 0; i <= Workload.rectNum; i++) {
if (load[i] == undefined) {
load[i] = 0;
}
}
}
Workload.Data = load;
Workload.type = type;
draw(canvas);
}
//type,0,代表年,1代表月,2代表日
//开始绘画
function draw(canvas) {
var context = canvas.getContext("2d");
context.fillStyle = "#ffffff";
context.fillRect(0, 0, Workload.width(), Workload.height());
context.font = "10px Georgia";
lineXY(context);
drawCloseButton(context);
Workload.init();
myClearInterval();
drawPic(context);
}
function myClearInterval
() {
//清除所有动画
for (var i = 0; i < time.length; i++) {
clearInterval(time[i]);
}
time.length = 0;
}
function drawCloseButton(context) {
var image = new Image();
image.src = "../images/index/close.png";
var imagex = Workload.width() - 37;
var imagey = 5;
if (image.complete) {
context.drawImage(image, imagex, imagey);
} else {
image.onload = function () {
context.drawImage(image, imagex, image | load[month]++;
}
}
} else if (type == 1) {//月
//统计一年中每个月的工作量
for (i = 0; i < data.length; i++) {
var dd = data[i];
var day = new Date(dd.outTime).getDate();
if (load[day] == undefined) {
load[day] = 1;
} else {
load[day]++;
}
}
} else if (type == 2) {//日
for (i = 0; i < data.length; i++) {
var dd = data[i];
var hour = new Date(dd.outTime).getHours();
if (load[hour] == undefined) {
load[hour] = 1; | identifier_body |
runningView.js | + condition.qualifiedTime
numId = STAR_FLAG + condition. +'-num',
imageId = STAR_FLAG + name+'-image',
numNodes = nodes[numId],
imageNodes = nodes[imageId];*/
},
allRight: function(userData, condition){
//numNodes.textContent = userData[name] ? '全部答对' : '未全部答对';
//imageNodes.style.bottom =
// (userData[name] ? 100 : 0) + '%';
}
},
conditionMethods: {
//starTpl: _.template(lib.content('tpl-star')),
right: function(value){
var data = {
title: '答对',
intro: '答对' + value + '题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
continueRight: function(value){
var data = {
title: '连对'+value+'题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
allRight: function(data){
var data = {
title: '全部答对'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
timeout: function(value){
var data = {
title: '超时',
intro: '超时少于' + value + '题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
quickAnswer: function(value, condition){
var qualifiedTime = condition.qualifiedTime,
title = qualifiedTime <=3 ? '秒答' : qualifiedTime + '秒答对'
data = {
title: title,
intro: title +'题数达到'+value
}
return data;
//return lib.str2dom(this.starTpl(data));
}
},
nodes: {
submit: lib.g('game-submit'),
reset: lib.g('reset-topic'),
back: lib.g('back-choice'),
main: lib.g('to-main'),
details: lib.g('game-details'),
options: lib.g('game-options'),
answerTip: lib.g('game-tip'),
panel: lib.g('game-scene'),
title: lib.g('game-title'),
nextTopic: lib.g('game-next'),
majorPass: lib.g('major-pass'),
starPass: lib.g('star-pass'),
countdownStep: lib.g('countdown-step')
},
render: function(dom, data){
if(data.hide!==undefined){
lib.hide(dom);
}
if(data.text!==undefined){
dom.textContent = data.text;
}
if(data.html!==undefined){
dom.innerHTML = data.html;
}
},
extendCondition: function(conditions){
var methods = this.conditionMethods;
if(conditions.major){
conditions.major.forEach(function(condition){
var renderData = | var renderData = methods[condition.name](condition.value, condition);
lib.extend(condition, renderData);
});
}
},
scene: {},
tipObj: {},
tipPanel: lib.g('window-tip')
}
publicMethods = {
hasInitEvents: false,
initSet: function(id, ev, events, context){
privateMethods.scene[id] = {
ev: ev,
context: context
};
//lib
},
changeTo: function(id){
var nodes = privateMethods.nodes,
scene = privateMethods.scene[id],
events = scene.ev,
context = scene.context;
Object.keys(events).forEach(function(ev){
var data = events[ev],
node = nodes[ev];
node.onclick = data.click ? function(){
data.click.call(context);
} : null;
// 默认显示
lib[ data.hide ? 'hide' : 'show' ](node);
});
},
render: function(data){
var nodes = privateMethods.nodes,
render = privateMethods.render;
Object.keys(data).forEach(function(name){
render(nodes[name], data[name]);
});
},
renderRemindItems: function(userData, conditions){
var vm = this.remindVm;
conditions.forEach(function(condition){
vm[condition](userData[condition]);
});
},
renderCondition: function(userData, conditions){
var vm = this.starViewModel,
map = this.starMap,
commonViewModel = this.commonViewModel,
newValue;
Object.keys(userData).forEach(function(key){
var indexData = map[key];
newValue = userData[key]
if(indexData){
var typeData = vm[indexData.type]();
typeData[indexData.index].userValue(newValue);
}
if(commonViewModel[key]){
commonViewModel[key](newValue);
}
});
return;
},
/**
* 展现各种浮层弹窗
* @param {string} id 浮层的id
* @param {object} data 浮层的渲染数据
* @param {object} events model层提供的一些供view使用的方法
* @return {object} 浮层对象
*/
showTip: function(id, data, events){
var tipObj = privateMethods.tipObj[id],
panel = privateMethods.tipPanel,
detail,
tpl,
toolVm;
if(!tipObj){
tipObj = privateMethods.tipObj[id] = {};
tipObj.init = true;
tipObj.tpl = _.template(lib.content(id+'-tpl'));
tipObj.detail = lib.g(id+'-detail');
tipObj.toolVm = {
hasPass: ko.observable(data.hasPass)
}
lib.g(id+'-tool').onclick = function(e){
var target = e.target,
clickEvent;
if(target&&(clickEvent=lib.attr(target, 'pt-click'))){
events[clickEvent]&&events[clickEvent].call(events);
}
}
ko.applyBindings(tipObj.toolVm, lib.g(id+'-tool'));
}
tpl = tipObj.tpl;
detail = tipObj.detail;
privateMethods.extendCondition(data)
detail.innerHTML = tpl(data);
if(privateMethods.tipOldId){
lib.removeClass(panel, privateMethods.tipOldId);
}
lib.addClass(panel, id);
privateMethods.tipOldId = id;
lib.show(panel);
toolVm = tipObj.toolVm;
Object.keys(data).forEach(function(d){
if(d in toolVm){
toolVm[d](data[d]);
}
});
return tipObj;
},
hideTip: function(){
lib.hide(privateMethods.tipPanel);
},
starViewModel: null,
commonViewModel: null,
starMap: {},
initPassConditon: function(data, userData){
var conditionData = {},
map = this.starMap = {},
methods = privateMethods.conditionMethods;
for(var key in data){
if(data.hasOwnProperty(key)){
var cd = conditionData[key] = [];
data[key].forEach(function(condition, index){
var renderData = methods[condition.name](condition.value, condition);
lib.extend(renderData, condition);
renderData.userValue =
ko.observable(userData[renderData.id]);
renderData.percent = ko.computed(function() {
var per= this.userValue()/this.value;
return -(100-Math.floor((per>1?1:per)*100))+"%" ;
}, renderData);
cd.push(renderData);
map[renderData.id] = {
type: key,
index: index
}
});
}
}
lib[ data.star ? 'show' : 'hide' ](privateMethods.nodes.starPass);
if(!this.starViewModel){
this.starViewModel = {
major: ko.observableArray(conditionData.major),
star: ko.observableArray(conditionData.star)
}
ko.applyBindings(this.starViewModel, lib.g('star-panel'));
var commonViewModel = this.commonViewModel = {};
for(var key in userData){
if(userData.hasOwnProperty(key)){
commonViewModel[key] = ko.observable(userData[key]);
}
}
ko.applyBindings(commonViewModel, lib.g('game-command'));
}else{
var vmMajor = this.starViewModel.major,
vmStar = this.starViewModel.star,
len = Number.MAX_VALUE;
vmMajor.splice.apply(vmMajor, [0,len].concat(conditionData.major));
vmStar.splice.apply(vmStar, [0,len].concat(conditionData.star));
}
},
initOverConditon: function(){
},
remindVm: null,
initRemindItems: function(items, userData){
var vm = this.remindVm;
if(!vm){
vm = this.remindVm = {
right: ko.observable(undefined),
continueRight: ko.observable(undefined)
}
ko.applyBindings(vm, lib.g('remind | methods[condition.name](condition.value, condition);
lib.extend(condition, renderData);
});
}
if(conditions.star){
conditions.star.forEach(function(condition){
| conditional_block |
runningView.js | ' + condition.qualifiedTime
numId = STAR_FLAG + condition. +'-num',
imageId = STAR_FLAG + name+'-image',
numNodes = nodes[numId],
imageNodes = nodes[imageId];*/
},
allRight: function(userData, condition){
//numNodes.textContent = userData[name] ? '全部答对' : '未全部答对';
//imageNodes.style.bottom =
// (userData[name] ? 100 : 0) + '%';
}
},
conditionMethods: {
//starTpl: _.template(lib.content('tpl-star')),
right: function(value){
var data = {
title: '答对',
intro: '答对' + value + '题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
continueRight: function(value){
var data = {
title: '连对'+value+'题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
allRight: function(data){
var data = {
title: '全部答对'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
timeout: function(value){
var data = {
title: '超时',
intro: '超时少于' + value + '题'
}
return data;
//return lib.str2dom(this.starTpl(data));
},
quickAnswer: function(value, condition){
var qualifiedTime = condition.qualifiedTime,
title = qualifiedTime <=3 ? '秒答' : qualifiedTime + '秒答对'
data = {
title: title,
intro: title +'题数达到'+value
}
return data;
//return lib.str2dom(this.starTpl(data));
}
},
nodes: {
submit: lib.g('game-submit'),
reset: lib.g('reset-topic'),
back: lib.g('back-choice'),
main: lib.g('to-main'),
details: lib.g('game-details'),
options: lib.g('game-options'),
answerTip: lib.g('game-tip'),
panel: lib.g('game-scene'),
title: lib.g('game-title'),
nextTopic: lib.g('game-next'),
majorPass: lib.g('major-pass'),
starPass: lib.g('star-pass'),
countdownStep: lib.g('countdown-step')
},
render: function(dom, data){
if(data.hide!==undefined){
lib.hide(dom);
}
if(data.text!==undefined){
dom.textContent = data.text;
}
if(data.html!==undefined){
dom.innerHTML = data.html;
}
},
extendCondition: function(conditions){
var methods = this.conditionMethods;
if(conditions.major){
conditions.major.forEach(function(condition){
var renderData = methods[condition.name](condition.value, condition);
lib.extend(condition, renderData);
});
}
if(conditions.star){
conditions.star.forEach(function(condition){
var renderData = methods[condition.name](condition.value, condition);
lib.extend(condition, renderData);
});
}
},
scene: {},
tipObj: {},
tipPanel: lib.g('window-tip')
}
publicMethods = {
hasInitEvents: false,
initSet: function(id, ev, events, context){
privateMethods.scene[id] = {
ev: ev,
context: context | changeTo: function(id){
var nodes = privateMethods.nodes,
scene = privateMethods.scene[id],
events = scene.ev,
context = scene.context;
Object.keys(events).forEach(function(ev){
var data = events[ev],
node = nodes[ev];
node.onclick = data.click ? function(){
data.click.call(context);
} : null;
// 默认显示
lib[ data.hide ? 'hide' : 'show' ](node);
});
},
render: function(data){
var nodes = privateMethods.nodes,
render = privateMethods.render;
Object.keys(data).forEach(function(name){
render(nodes[name], data[name]);
});
},
renderRemindItems: function(userData, conditions){
var vm = this.remindVm;
conditions.forEach(function(condition){
vm[condition](userData[condition]);
});
},
renderCondition: function(userData, conditions){
var vm = this.starViewModel,
map = this.starMap,
commonViewModel = this.commonViewModel,
newValue;
Object.keys(userData).forEach(function(key){
var indexData = map[key];
newValue = userData[key]
if(indexData){
var typeData = vm[indexData.type]();
typeData[indexData.index].userValue(newValue);
}
if(commonViewModel[key]){
commonViewModel[key](newValue);
}
});
return;
},
/**
* 展现各种浮层弹窗
* @param {string} id 浮层的id
* @param {object} data 浮层的渲染数据
* @param {object} events model层提供的一些供view使用的方法
* @return {object} 浮层对象
*/
showTip: function(id, data, events){
var tipObj = privateMethods.tipObj[id],
panel = privateMethods.tipPanel,
detail,
tpl,
toolVm;
if(!tipObj){
tipObj = privateMethods.tipObj[id] = {};
tipObj.init = true;
tipObj.tpl = _.template(lib.content(id+'-tpl'));
tipObj.detail = lib.g(id+'-detail');
tipObj.toolVm = {
hasPass: ko.observable(data.hasPass)
}
lib.g(id+'-tool').onclick = function(e){
var target = e.target,
clickEvent;
if(target&&(clickEvent=lib.attr(target, 'pt-click'))){
events[clickEvent]&&events[clickEvent].call(events);
}
}
ko.applyBindings(tipObj.toolVm, lib.g(id+'-tool'));
}
tpl = tipObj.tpl;
detail = tipObj.detail;
privateMethods.extendCondition(data)
detail.innerHTML = tpl(data);
if(privateMethods.tipOldId){
lib.removeClass(panel, privateMethods.tipOldId);
}
lib.addClass(panel, id);
privateMethods.tipOldId = id;
lib.show(panel);
toolVm = tipObj.toolVm;
Object.keys(data).forEach(function(d){
if(d in toolVm){
toolVm[d](data[d]);
}
});
return tipObj;
},
hideTip: function(){
lib.hide(privateMethods.tipPanel);
},
starViewModel: null,
commonViewModel: null,
starMap: {},
initPassConditon: function(data, userData){
var conditionData = {},
map = this.starMap = {},
methods = privateMethods.conditionMethods;
for(var key in data){
if(data.hasOwnProperty(key)){
var cd = conditionData[key] = [];
data[key].forEach(function(condition, index){
var renderData = methods[condition.name](condition.value, condition);
lib.extend(renderData, condition);
renderData.userValue =
ko.observable(userData[renderData.id]);
renderData.percent = ko.computed(function() {
var per= this.userValue()/this.value;
return -(100-Math.floor((per>1?1:per)*100))+"%" ;
}, renderData);
cd.push(renderData);
map[renderData.id] = {
type: key,
index: index
}
});
}
}
lib[ data.star ? 'show' : 'hide' ](privateMethods.nodes.starPass);
if(!this.starViewModel){
this.starViewModel = {
major: ko.observableArray(conditionData.major),
star: ko.observableArray(conditionData.star)
}
ko.applyBindings(this.starViewModel, lib.g('star-panel'));
var commonViewModel = this.commonViewModel = {};
for(var key in userData){
if(userData.hasOwnProperty(key)){
commonViewModel[key] = ko.observable(userData[key]);
}
}
ko.applyBindings(commonViewModel, lib.g('game-command'));
}else{
var vmMajor = this.starViewModel.major,
vmStar = this.starViewModel.star,
len = Number.MAX_VALUE;
vmMajor.splice.apply(vmMajor, [0,len].concat(conditionData.major));
vmStar.splice.apply(vmStar, [0,len].concat(conditionData.star));
}
},
initOverConditon: function(){
},
remindVm: null,
initRemindItems: function(items, userData){
var vm = this.remindVm;
if(!vm){
vm = this.remindVm = {
right: ko.observable(undefined),
continueRight: ko.observable(undefined)
}
ko.applyBindings(vm, lib.g('remind-items | };
//lib
}, | random_line_split |
p6.py | samples[:,1], s=0.2)
plt.show()
bins= 20
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
samples_for_f = samples
# #### c. Consider a normal distribution specified by the following parameters:
# $ n = 2, N = 500, M = \begin{bmatrix}
# m_1 \\
# m_2
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# \sigma_{11} & \sigma_{12} \\
# \sigma_{21} & \sigma_{22}
# \end{bmatrix}$ <br>
# Determine appropriate values for each of the unknown variables, so that the shape of the
# distribution becomes: <br>
# > c.1) A circle in the upper left of the Euclidean coordinate system. <br>
# > c.2) A diagonal line (/ shape) in the centre<br>
# > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br>
#
#
# Display the generated samples. <br>
# c.1) A circle in the upper left of the Euclidean coordinate system:
# $$ m_1 < 0 , m_2 > 0, \Sigma = I $$
# In[4]:
def | ():
plt.xlim([-10, 10])
plt.ylim([-10, 10])
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
# In[5]:
N =5000 # we changed N, because N=500 was too small for being visualized well
samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N)
setting_function()
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.2) A diagonal line (/ shape) in the centre<br>
# We have to select M to be $(0,0)$ for being in the center. <br>
# to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br>
# To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $
#
# In[6]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system
#
# In[7]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# #### d) Consider a random variable with
# $ n = 2, N = 500, M = \begin{bmatrix}
# 2 \\
# 3
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# 1 & 2\rho \\
# 2 \rho & 4
# \end{bmatrix}$ <br>
# #### compute $d^2(x)$ analytically, if the parameters are:
# $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$
# If $ \Sigma^{-1} = \begin{bmatrix}
# a & b \\
# c & d
# \end{bmatrix}$ <br> then:
# $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br>
# for $\rho =-0.99$:
# $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$
# In[8]:
levels=[4,9,16]
plt.xlim(-10,10)
plt.ylim(-10,10)
X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1]
xy = np.vstack((X.flatten(), Y.flatten())).T
Sigmas = []
vals = [-0.99, -0.5, 0.5, 0.99]
for val in vals:
Sigmas.append(np.array([[1,2*val],[2*val,4]]))
mu = np.array([2,3]) #np.array([0,0])# np.array([2,3])
def d_squared (xy, sigma, mu):
matrix = inv(sigma)
a = xy - mu
t = np.matmul(a,matrix)
return np.matmul(t, a.T)
for sigma in Sigmas:
print ("Sigma is")
print (sigma)
print ("Formula")
t= inv(sigma)
a = t[0][0]
b = t[0][1]
c = t[1][0]
d = t[1][1]
s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0"
print (s)
Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu)
Z = np.reshape(Z, (len(X), -1))
plt.contour(X,Y,Z, levels=[4,9,16])
plt.show()
# #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results.
# In[9]:
#Just repeating part b:
N =5000
samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N)
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
bins= 10
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
# In[10]:
estimated_mean = np.mean(samples, axis=0)
print ("estimated mean", estimated_mean)
print ("real mean [2,1]")
estimated_var = 0
for i in range(0, len(samples)):
vec = np.array([samples[i]-estimated_mean])
tmp = np.matmul(vec.T, vec)
estimated_var += tmp
estimated_var /= (len(samples)-1)
print ("estimated sigma")
print (estimated_var)
print ("real sigma:")
print (np.array([[2,1],[1,3]]) )
# Comment: The estimated mean and sigma are close to real mean and real sigma. <br>
# Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough.
# #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $
#
# In[11]:
real_sigma = np.array([[2,1],[1,3]])
# First, whiten the estimated sigma:
estimated_sigma = estimated_var
w, v = linalg.eig(estimated_sigma)
normalizer = np.diag(np.sqrt(1/w))
first_transformation = np.matmul(v, normalizer)
print ("first transformation")
print (first_transformation)
| setting_function | identifier_name |
p6.py |
#
# for i in range(0, len(sigmas)):
# s = np.random.normal(mu, sigmas[i], N)
# print ("mu", mu, "Sigma", sigmas[i])
# plt.scatter (s, np.zeros(len(s)), s=0.1, color=colors[i])
# plt.xlim([485, 515])
# plt.show()
# #### b) Generate samples from a normal distributions specified by the following parameters: <br>
# $ n = 2, N = 500, M = \begin{bmatrix}
# 2 \\
# 1
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# 2 & 1 \\
# 1 & 3
# \end{bmatrix}$ <br>
# Display the samples, as well as the associated contour plot.
# $
# \begin{pmatrix}
# 2 & 3 & 1 \
# 0.5 & 2 & -1 \
# -1 & 5 & -7
# \end{pmatrix}
# $
# In[3]:
N =5000 # we changed N, because N=500 was too small for being visualized well
samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N)
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
bins= 20
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
samples_for_f = samples
# #### c. Consider a normal distribution specified by the following parameters:
# $ n = 2, N = 500, M = \begin{bmatrix}
# m_1 \\
# m_2
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# \sigma_{11} & \sigma_{12} \\
# \sigma_{21} & \sigma_{22}
# \end{bmatrix}$ <br>
# Determine appropriate values for each of the unknown variables, so that the shape of the
# distribution becomes: <br>
# > c.1) A circle in the upper left of the Euclidean coordinate system. <br>
# > c.2) A diagonal line (/ shape) in the centre<br>
# > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br>
#
#
# Display the generated samples. <br>
# c.1) A circle in the upper left of the Euclidean coordinate system:
# $$ m_1 < 0 , m_2 > 0, \Sigma = I $$
# In[4]:
def setting_function ():
plt.xlim([-10, 10])
plt.ylim([-10, 10])
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
# In[5]:
N =5000 # we changed N, because N=500 was too small for being visualized well
samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N)
setting_function()
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.2) A diagonal line (/ shape) in the centre<br>
# We have to select M to be $(0,0)$ for being in the center. <br>
# to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br>
# To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $
#
# In[6]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system
#
# In[7]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# #### d) Consider a random variable with
# $ n = 2, N = 500, M = \begin{bmatrix}
# 2 \\
# 3
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# 1 & 2\rho \\
# 2 \rho & 4
# \end{bmatrix}$ <br>
# #### compute $d^2(x)$ analytically, if the parameters are:
# $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$
# If $ \Sigma^{-1} = \begin{bmatrix}
# a & b \\
# c & d
# \end{bmatrix}$ <br> then:
# $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br>
# for $\rho =-0.99$:
# $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$
# In[8]:
levels=[4,9,16]
plt.xlim(-10,10)
plt.ylim(-10,10)
X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1]
xy = np.vstack((X.flatten(), Y.flatten())).T
Sigmas = []
vals = [-0.99, -0.5, 0.5, 0.99]
for val in vals:
Sigmas.append(np.array([[1,2*val],[2*val,4]]))
mu = np.array([2,3]) #np.array([0,0])# np.array([2,3])
def d_squared (xy, sigma, mu):
matrix = inv(sigma)
a = xy - mu
t = np.matmul(a,matrix)
return np.matmul(t, a.T)
for sigma in Sigmas:
print ("Sigma is")
print (sigma)
print ("Formula")
t= inv(sigma)
a = t[0][0]
b = t[0][1]
c = t[1][0]
d = t[1][1]
s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0"
print (s)
Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu)
Z = np.reshape(Z, (len(X), -1))
plt.contour(X,Y,Z, levels=[4,9,16])
plt.show()
# #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results.
# In[9]:
#Just repeating part b:
N =5000
samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N)
plt.xlim([-7.5, 13.5])
plt.ylim([- | s = np.random.normal(mu, sigmas[i], N)
print ("mu", mu, "Sigma", sigmas[i])
sns.distplot(s, color=colors[i], bins=bins)
plt.scatter (s, np.zeros(len(s)), s=0.2, color=colors[i])
plt.xlim([485, 515])
plt.show() | conditional_block |
|
p6.py | samples[:,1], s=0.2)
plt.show()
bins= 20
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
samples_for_f = samples
# #### c. Consider a normal distribution specified by the following parameters:
# $ n = 2, N = 500, M = \begin{bmatrix}
# m_1 \\
# m_2
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# \sigma_{11} & \sigma_{12} \\
# \sigma_{21} & \sigma_{22}
# \end{bmatrix}$ <br>
# Determine appropriate values for each of the unknown variables, so that the shape of the
# distribution becomes: <br>
# > c.1) A circle in the upper left of the Euclidean coordinate system. <br>
# > c.2) A diagonal line (/ shape) in the centre<br>
# > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br>
#
#
# Display the generated samples. <br>
# c.1) A circle in the upper left of the Euclidean coordinate system:
# $$ m_1 < 0 , m_2 > 0, \Sigma = I $$
# In[4]:
def setting_function ():
|
# In[5]:
N =5000 # we changed N, because N=500 was too small for being visualized well
samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N)
setting_function()
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.2) A diagonal line (/ shape) in the centre<br>
# We have to select M to be $(0,0)$ for being in the center. <br>
# to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br>
# To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $
#
# In[6]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system
#
# In[7]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# #### d) Consider a random variable with
# $ n = 2, N = 500, M = \begin{bmatrix}
# 2 \\
# 3
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# 1 & 2\rho \\
# 2 \rho & 4
# \end{bmatrix}$ <br>
# #### compute $d^2(x)$ analytically, if the parameters are:
# $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$
# If $ \Sigma^{-1} = \begin{bmatrix}
# a & b \\
# c & d
# \end{bmatrix}$ <br> then:
# $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br>
# for $\rho =-0.99$:
# $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$
# In[8]:
levels=[4,9,16]
plt.xlim(-10,10)
plt.ylim(-10,10)
X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1]
xy = np.vstack((X.flatten(), Y.flatten())).T
Sigmas = []
vals = [-0.99, -0.5, 0.5, 0.99]
for val in vals:
Sigmas.append(np.array([[1,2*val],[2*val,4]]))
mu = np.array([2,3]) #np.array([0,0])# np.array([2,3])
def d_squared (xy, sigma, mu):
matrix = inv(sigma)
a = xy - mu
t = np.matmul(a,matrix)
return np.matmul(t, a.T)
for sigma in Sigmas:
print ("Sigma is")
print (sigma)
print ("Formula")
t= inv(sigma)
a = t[0][0]
b = t[0][1]
c = t[1][0]
d = t[1][1]
s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0"
print (s)
Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu)
Z = np.reshape(Z, (len(X), -1))
plt.contour(X,Y,Z, levels=[4,9,16])
plt.show()
# #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results.
# In[9]:
#Just repeating part b:
N =5000
samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N)
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
bins= 10
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
# In[10]:
estimated_mean = np.mean(samples, axis=0)
print ("estimated mean", estimated_mean)
print ("real mean [2,1]")
estimated_var = 0
for i in range(0, len(samples)):
vec = np.array([samples[i]-estimated_mean])
tmp = np.matmul(vec.T, vec)
estimated_var += tmp
estimated_var /= (len(samples)-1)
print ("estimated sigma")
print (estimated_var)
print ("real sigma:")
print (np.array([[2,1],[1,3]]) )
# Comment: The estimated mean and sigma are close to real mean and real sigma. <br>
# Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough.
# #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $
#
# In[11]:
real_sigma = np.array([[2,1],[1,3]])
# First, whiten the estimated sigma:
estimated_sigma = estimated_var
w, v = linalg.eig(estimated_sigma)
normalizer = np.diag(np.sqrt(1/w))
first_transformation = np.matmul(v, normalizer)
print ("first transformation")
print (first_transformation)
| plt.xlim([-10, 10])
plt.ylim([-10, 10])
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none') | identifier_body |
p6.py | samples[:,1], s=0.2)
plt.show()
bins= 20
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
samples_for_f = samples
# #### c. Consider a normal distribution specified by the following parameters:
# $ n = 2, N = 500, M = \begin{bmatrix}
# m_1 \\
# m_2
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# \sigma_{11} & \sigma_{12} \\
# \sigma_{21} & \sigma_{22}
# \end{bmatrix}$ <br>
# Determine appropriate values for each of the unknown variables, so that the shape of the
# distribution becomes: <br>
# > c.1) A circle in the upper left of the Euclidean coordinate system. <br>
# > c.2) A diagonal line (/ shape) in the centre<br>
# > c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system<br>
#
#
# Display the generated samples. <br>
# c.1) A circle in the upper left of the Euclidean coordinate system:
# $$ m_1 < 0 , m_2 > 0, \Sigma = I $$
# In[4]:
def setting_function ():
plt.xlim([-10, 10])
plt.ylim([-10, 10])
ax = plt.gca()
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
# In[5]:
N =5000 # we changed N, because N=500 was too small for being visualized well
samples = np.random.multivariate_normal(np.array([-5,5]), np.array([[1,0],[0,1]]), N)
setting_function()
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.2) A diagonal line (/ shape) in the centre<br>
# We have to select M to be $(0,0)$ for being in the center. <br>
# to make the distribution like a line /, we will choose $Sigma$ so that its biggest eigenvector of $\Sigma$ points to the / direction (parallel to $vector=(1,1))$ and its second eigen vector points to $(-1, 1)$. <br>
# To make the distribution similar to a diogional line, we will choose $\lambda_1=10, \lambda_2=1 $
#
# In[6]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([0,0]), np.array([[11/2,9/2],[9/2,11/2]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# c.3) A horizontal ellipsoid in the lower right of the Euclidean coordinate system
#
# In[7]:
N=10000
setting_function()
samples = np.random.multivariate_normal(np.array([5,-5]), np.array([[2.5, 0],[0,1]]), N)
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
# #### d) Consider a random variable with
# $ n = 2, N = 500, M = \begin{bmatrix}
# 2 \\
# 3
# \end{bmatrix}, \Sigma = \begin{bmatrix}
# 1 & 2\rho \\
# 2 \rho & 4
# \end{bmatrix}$ <br>
# #### compute $d^2(x)$ analytically, if the parameters are:
# $$ \rho = \{-0.99, -0.5, 0.5, 0.99\} $$
# If $ \Sigma^{-1} = \begin{bmatrix}
# a & b \\
# c & d
# \end{bmatrix}$ <br> then:
# $ d^2(x)= (x-m)^T \Sigma^{-1} (x-m) = ax_1^2 + (-4a -3(c+d))x_1 + (-6d -2(c+d))x_2 + (c+d)x_1x_2 + dx_2^2 $ <br>
# for $\rho =-0.99$:
# $$ d^2(x)= 50x_1^2 + (-350)x_1 + (-75)x_2 + 50 x_1x_2 + 12.5x_2^2 $$
# In[8]:
levels=[4,9,16]
plt.xlim(-10,10)
plt.ylim(-10,10)
X,Y = np.mgrid[-10:10.1:0.1, -10:10.1:0.1]
xy = np.vstack((X.flatten(), Y.flatten())).T
Sigmas = []
vals = [-0.99, -0.5, 0.5, 0.99]
for val in vals:
Sigmas.append(np.array([[1,2*val],[2*val,4]]))
mu = np.array([2,3]) #np.array([0,0])# np.array([2,3])
def d_squared (xy, sigma, mu):
matrix = inv(sigma)
a = xy - mu
t = np.matmul(a,matrix)
return np.matmul(t, a.T)
for sigma in Sigmas:
print ("Sigma is")
print (sigma)
print ("Formula")
t= inv(sigma)
a = t[0][0]
b = t[0][1]
c = t[1][0]
d = t[1][1]
s = str(a) + "x^2 + " + str(-4*a-3*(c+d)) + "x +" + str(-6*d-2*(c+d)) + "y + " + str(c+d) + "xy + " + str(d) + "y^2 = 0"
print (s)
Z = np.apply_along_axis(func1d = d_squared, axis= 1, arr = xy, sigma=sigma, mu = mu)
Z = np.reshape(Z, (len(X), -1))
plt.contour(X,Y,Z, levels=[4,9,16])
plt.show()
# #### f) Calculate the sample mean $ \hat{M} $, and sample covariance matrix $\hat{\Sigma}$ of the distribution in part b., and comment on the results.
# In[9]:
#Just repeating part b:
N =5000
samples = np.random.multivariate_normal(np.array([2,1]), np.array([[2,1],[1,3]]), N)
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
plt.scatter (samples[:,0], samples[:,1], s=0.2)
plt.show()
bins= 10
plt.xlim([-7.5, 13.5])
plt.ylim([-10, 10])
plt.gca().set_aspect('equal')
(counts, x_bins, y_bins) = np.histogram2d(samples[:, 0], samples[:, 1], bins=bins)
plt.contourf(counts, extent=[x_bins[0], x_bins[-1], y_bins[0], y_bins[-1]])
plt.show()
# In[10]:
estimated_mean = np.mean(samples, axis=0)
print ("estimated mean", estimated_mean) | vec = np.array([samples[i]-estimated_mean])
tmp = np.matmul(vec.T, vec)
estimated_var += tmp
estimated_var /= (len(samples)-1)
print ("estimated sigma")
print (estimated_var)
print ("real sigma:")
print (np.array([[2,1],[1,3]]) )
# Comment: The estimated mean and sigma are close to real mean and real sigma. <br>
# Since these estimations are consistent and not biased, the estimation will become close to the real value as the number of samples become large enough.
# #### g) Simultaneously diagonalise $\sigma$ and $\hat{\sigma}$ , and form a vector $ V = [\lambda_1, \lambda_2]^T $
#
# In[11]:
real_sigma = np.array([[2,1],[1,3]])
# First, whiten the estimated sigma:
estimated_sigma = estimated_var
w, v = linalg.eig(estimated_sigma)
normalizer = np.diag(np.sqrt(1/w))
first_transformation = np.matmul(v, normalizer)
print ("first transformation")
print (first_transformation)
print | print ("real mean [2,1]")
estimated_var = 0
for i in range(0, len(samples)): | random_line_split |
twitch.py | ness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch baddness')
return None
async def get_user(client_id, user_id):
return await twitch_request(client_id, 'users', user_id)
async def | (client_id, game_id, user_id):
game = await twitch_request(client_id, 'games', game_id)
if game:
return game['name']
else:
headers = { 'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json' }
try:
async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r:
if r.status == 200:
js = await r.json()
game_name = ['stream']['game']
if game_name == "":
game_name = 'Playing some videogames'
return game_name
else:
logger.error('Twitch Kraken HTTP badness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch Kraken baddness')
return 'Playing some videogames'
async def lookup_users(config, user_list):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r:
if r.status == 200:
user_json = await r.json()
return user_json['data']
else:
logger.error("Username look-up fail %d" % r.status)
logger.error(await r.text())
return []
def ibzytime(hour, minute):
negative = False
hm = (hour * 60) + minute
if (hour < 11):
hm += (24 * 60)
hm -= (23 * 60)
if (hm < 0):
hm *= -1
negative = True
return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60))
async def parse_streams(client, config, server, stream_data):
users_announced = []
try:
client_id = config['twitch']['client-id']
for live_data in stream_data['data']:
logger.debug(live_data)
if ('type' in live_data) and (live_data['type'] != 'live'):
logger.info('Ignoring VOD')
continue
# Was seeing some issues where the first notification had no language set, and then the second was sent
# with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications
# being ignored.
#if ('language' in live_data) and (live_data['language'] == ''):
# logger.info("Ignoring live data with no language set")
# continue
start_time = dateutil.parser.parse(live_data['started_at'])
ourtz = pytz.timezone('Europe/London')
time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
start_time_local = start_time.astimezone(ourtz)
time_diff = time_now - start_time
logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff))
user_id = live_data['user_id']
user = await get_user(client_id, user_id)
last_stream = None
if user_id in stream_state:
last_stream = stream_state[user_id]
game_title = await get_game_title(client_id, live_data['game_id'], user_id)
user_url = "https://twitch.tv/%s" % user['login']
embed = discord.Embed(title = user_url, url = user_url, color = 2207743)
embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url)
embed.set_thumbnail(url = user['profile_image_url'])
embed.add_field(name = game_title, value = live_data['title'], inline = False)
if user['login'] == 'evenibzy':
embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute)))
else:
embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute)))
channels = config['discord']['channels']
channel_name = channels['_default_']
delete = True
if user['login'] in channels:
channel_name = channels[user['login']]
delete = False
logger.debug("channel_name=%s" % channel_name)
channel = discord.utils.get(server.channels, name = channel_name)
try:
new_stream = {}
new_stream['message'] = await client.send_message(channel, embed = embed)
stream_state[user_id] = new_stream
users_announced.append(user['display_name'])
logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id))
if last_stream and delete:
logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id))
try:
await client.delete_message(last_stream['message'])
except:
logger.exception('Delete failed')
elif not delete:
logger.debug('No delete on this stream')
else:
logger.debug('No prior stream to delete')
except:
logger.exception('Discord badness')
logger.error("channel_name=%s" % channel_name)
logger.error("embed=%s" % embed.to_dict())
except:
logger.exception('Stream badness')
return users_announced
async def sub_unsub_user(config, user_logins, subscribe, users = None):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
# Post data for subscription request
sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe",
"hub.lease_seconds": 864000,
"hub.secret": config['twitch']['secret']
}
if not users:
users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins)))
user_names = ''
user_ids = []
# Send a (un)subcription request for each username
for user in users:
logger.info('%s: %s' % (user['display_name'], user['id']))
sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id'])
sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id']
# Send a (un)subcription request for each username
async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r:
if r.status== 202:
logger.info('%s OK' % sub_data['hub.topic'])
user_names += ' %s' % user['display_name']
user_ids.append(user['id'])
else:
logger.error('Went wrong %d' % r.status)
logger.error(await r.text())
if len(user_ids) > 0:
if subscribe:
return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids)
else:
return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids)
return ("Sorry, old-bean. I couldn't find anyone.", None)
async def sub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, True)
async def unsub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, False)
async def announce_user(client, config, server, user_logins):
response = "Nothing doing, I'm afraid"
logger.info(user_logins)
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
params = list(map(lambda u: ('user_login', u), user_logins))
async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r:
if r.status == 200:
streams_json = await r.json()
users = await parse_streams(client, config, server, streams_json)
if len(users) > 0:
response = "Announced %s" % (' '.join(users))
return (response, None)
async def get_subs(config):
headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] }
get_more = True
user_ids = []
params = None
while get_more:
get_more = False
async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r:
if r.status == 200:
subs = await | get_game_title | identifier_name |
twitch.py | ness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch baddness')
return None
async def get_user(client_id, user_id):
return await twitch_request(client_id, 'users', user_id)
async def get_game_title(client_id, game_id, user_id):
|
async def lookup_users(config, user_list):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r:
if r.status == 200:
user_json = await r.json()
return user_json['data']
else:
logger.error("Username look-up fail %d" % r.status)
logger.error(await r.text())
return []
def ibzytime(hour, minute):
negative = False
hm = (hour * 60) + minute
if (hour < 11):
hm += (24 * 60)
hm -= (23 * 60)
if (hm < 0):
hm *= -1
negative = True
return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60))
async def parse_streams(client, config, server, stream_data):
users_announced = []
try:
client_id = config['twitch']['client-id']
for live_data in stream_data['data']:
logger.debug(live_data)
if ('type' in live_data) and (live_data['type'] != 'live'):
logger.info('Ignoring VOD')
continue
# Was seeing some issues where the first notification had no language set, and then the second was sent
# with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications
# being ignored.
#if ('language' in live_data) and (live_data['language'] == ''):
# logger.info("Ignoring live data with no language set")
# continue
start_time = dateutil.parser.parse(live_data['started_at'])
ourtz = pytz.timezone('Europe/London')
time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
start_time_local = start_time.astimezone(ourtz)
time_diff = time_now - start_time
logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff))
user_id = live_data['user_id']
user = await get_user(client_id, user_id)
last_stream = None
if user_id in stream_state:
last_stream = stream_state[user_id]
game_title = await get_game_title(client_id, live_data['game_id'], user_id)
user_url = "https://twitch.tv/%s" % user['login']
embed = discord.Embed(title = user_url, url = user_url, color = 2207743)
embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url)
embed.set_thumbnail(url = user['profile_image_url'])
embed.add_field(name = game_title, value = live_data['title'], inline = False)
if user['login'] == 'evenibzy':
embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute)))
else:
embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute)))
channels = config['discord']['channels']
channel_name = channels['_default_']
delete = True
if user['login'] in channels:
channel_name = channels[user['login']]
delete = False
logger.debug("channel_name=%s" % channel_name)
channel = discord.utils.get(server.channels, name = channel_name)
try:
new_stream = {}
new_stream['message'] = await client.send_message(channel, embed = embed)
stream_state[user_id] = new_stream
users_announced.append(user['display_name'])
logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id))
if last_stream and delete:
logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id))
try:
await client.delete_message(last_stream['message'])
except:
logger.exception('Delete failed')
elif not delete:
logger.debug('No delete on this stream')
else:
logger.debug('No prior stream to delete')
except:
logger.exception('Discord badness')
logger.error("channel_name=%s" % channel_name)
logger.error("embed=%s" % embed.to_dict())
except:
logger.exception('Stream badness')
return users_announced
async def sub_unsub_user(config, user_logins, subscribe, users = None):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
# Post data for subscription request
sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe",
"hub.lease_seconds": 864000,
"hub.secret": config['twitch']['secret']
}
if not users:
users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins)))
user_names = ''
user_ids = []
# Send a (un)subcription request for each username
for user in users:
logger.info('%s: %s' % (user['display_name'], user['id']))
sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id'])
sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id']
# Send a (un)subcription request for each username
async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r:
if r.status== 202:
logger.info('%s OK' % sub_data['hub.topic'])
user_names += ' %s' % user['display_name']
user_ids.append(user['id'])
else:
logger.error('Went wrong %d' % r.status)
logger.error(await r.text())
if len(user_ids) > 0:
if subscribe:
return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids)
else:
return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids)
return ("Sorry, old-bean. I couldn't find anyone.", None)
async def sub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, True)
async def unsub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, False)
async def announce_user(client, config, server, user_logins):
response = "Nothing doing, I'm afraid"
logger.info(user_logins)
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
params = list(map(lambda u: ('user_login', u), user_logins))
async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r:
if r.status == 200:
streams_json = await r.json()
users = await parse_streams(client, config, server, streams_json)
if len(users) > 0:
response = "Announced %s" % (' '.join(users))
return (response, None)
async def get_subs(config):
headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] }
get_more = True
user_ids = []
params = None
while get_more:
get_more = False
async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r:
if r.status == 200:
subs = await | game = await twitch_request(client_id, 'games', game_id)
if game:
return game['name']
else:
headers = { 'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json' }
try:
async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r:
if r.status == 200:
js = await r.json()
game_name = ['stream']['game']
if game_name == "":
game_name = 'Playing some videogames'
return game_name
else:
logger.error('Twitch Kraken HTTP badness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch Kraken baddness')
return 'Playing some videogames' | identifier_body |
twitch.py | ness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch baddness')
return None
async def get_user(client_id, user_id):
return await twitch_request(client_id, 'users', user_id)
async def get_game_title(client_id, game_id, user_id):
game = await twitch_request(client_id, 'games', game_id)
if game:
return game['name']
else:
headers = { 'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json' }
try:
async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r:
if r.status == 200:
js = await r.json()
game_name = ['stream']['game']
if game_name == "":
game_name = 'Playing some videogames'
return game_name
else:
logger.error('Twitch Kraken HTTP badness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch Kraken baddness')
return 'Playing some videogames'
async def lookup_users(config, user_list):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r:
if r.status == 200:
user_json = await r.json()
return user_json['data']
else:
logger.error("Username look-up fail %d" % r.status)
logger.error(await r.text())
return []
def ibzytime(hour, minute):
negative = False
hm = (hour * 60) + minute
if (hour < 11):
hm += (24 * 60)
hm -= (23 * 60)
if (hm < 0):
hm *= -1
negative = True
return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60))
async def parse_streams(client, config, server, stream_data):
users_announced = []
try:
client_id = config['twitch']['client-id']
for live_data in stream_data['data']:
logger.debug(live_data)
if ('type' in live_data) and (live_data['type'] != 'live'):
logger.info('Ignoring VOD')
continue
# Was seeing some issues where the first notification had no language set, and then the second was sent
# with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications
# being ignored.
#if ('language' in live_data) and (live_data['language'] == ''):
# logger.info("Ignoring live data with no language set")
# continue
start_time = dateutil.parser.parse(live_data['started_at'])
ourtz = pytz.timezone('Europe/London')
time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
start_time_local = start_time.astimezone(ourtz)
time_diff = time_now - start_time
logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff))
user_id = live_data['user_id']
user = await get_user(client_id, user_id)
last_stream = None
if user_id in stream_state:
last_stream = stream_state[user_id]
game_title = await get_game_title(client_id, live_data['game_id'], user_id)
user_url = "https://twitch.tv/%s" % user['login']
embed = discord.Embed(title = user_url, url = user_url, color = 2207743)
embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url)
embed.set_thumbnail(url = user['profile_image_url'])
embed.add_field(name = game_title, value = live_data['title'], inline = False)
if user['login'] == 'evenibzy':
embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute)))
else:
embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute)))
channels = config['discord']['channels']
channel_name = channels['_default_']
delete = True
if user['login'] in channels:
channel_name = channels[user['login']]
delete = False
logger.debug("channel_name=%s" % channel_name)
channel = discord.utils.get(server.channels, name = channel_name)
try:
new_stream = {}
new_stream['message'] = await client.send_message(channel, embed = embed)
stream_state[user_id] = new_stream
users_announced.append(user['display_name'])
logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id))
if last_stream and delete:
logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id))
try:
await client.delete_message(last_stream['message'])
except:
logger.exception('Delete failed')
elif not delete:
logger.debug('No delete on this stream')
else:
logger.debug('No prior stream to delete')
except:
logger.exception('Discord badness')
logger.error("channel_name=%s" % channel_name)
logger.error("embed=%s" % embed.to_dict())
except:
logger.exception('Stream badness')
return users_announced
async def sub_unsub_user(config, user_logins, subscribe, users = None):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
# Post data for subscription request
sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe",
"hub.lease_seconds": 864000,
"hub.secret": config['twitch']['secret']
}
if not users:
users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins)))
user_names = ''
user_ids = []
# Send a (un)subcription request for each username
for user in users:
logger.info('%s: %s' % (user['display_name'], user['id']))
sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id'])
sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id']
# Send a (un)subcription request for each username
async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r:
if r.status== 202:
logger.info('%s OK' % sub_data['hub.topic'])
user_names += ' %s' % user['display_name']
user_ids.append(user['id'])
else:
logger.error('Went wrong %d' % r.status)
logger.error(await r.text())
if len(user_ids) > 0:
if subscribe:
return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids)
else:
return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids)
return ("Sorry, old-bean. I couldn't find anyone.", None)
async def sub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, True)
async def unsub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, False)
async def announce_user(client, config, server, user_logins):
response = "Nothing doing, I'm afraid"
logger.info(user_logins)
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
params = list(map(lambda u: ('user_login', u), user_logins))
async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r:
if r.status == 200:
|
return (response, None)
async def get_subs(config):
headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] }
get_more = True
user_ids = []
params = None
while get_more:
get_more = False
async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r:
if r.status == 200:
subs = await | streams_json = await r.json()
users = await parse_streams(client, config, server, streams_json)
if len(users) > 0:
response = "Announced %s" % (' '.join(users)) | conditional_block |
twitch.py | badness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch baddness')
return None
async def get_user(client_id, user_id):
return await twitch_request(client_id, 'users', user_id)
async def get_game_title(client_id, game_id, user_id):
game = await twitch_request(client_id, 'games', game_id)
if game:
return game['name']
else:
headers = { 'Client-ID': client_id,
'Accept': 'application/vnd.twitchtv.v5+json' }
try:
async with aiohttp.get('https://api.twitch.tv/kraken/streams/%s' % user_id, headers=headers) as r:
if r.status == 200:
js = await r.json()
game_name = ['stream']['game']
if game_name == "":
game_name = 'Playing some videogames'
return game_name
else:
logger.error('Twitch Kraken HTTP badness: %s', r.status)
logger.error(await r.text())
except:
logger.error('Twitch Kraken baddness')
return 'Playing some videogames'
async def lookup_users(config, user_list):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
async with aiohttp.get('https://api.twitch.tv/helix/users', headers=headers, params=user_list) as r:
if r.status == 200:
user_json = await r.json()
return user_json['data']
else:
logger.error("Username look-up fail %d" % r.status)
logger.error(await r.text())
return []
def ibzytime(hour, minute):
negative = False
hm = (hour * 60) + minute
if (hour < 11):
hm += (24 * 60)
hm -= (23 * 60)
if (hm < 0):
hm *= -1
negative = True
return '%s%02d:%02d EIT' % ('-' if negative else '+', (hm / 60), (hm % 60))
async def parse_streams(client, config, server, stream_data):
users_announced = []
try:
client_id = config['twitch']['client-id']
for live_data in stream_data['data']:
logger.debug(live_data)
if ('type' in live_data) and (live_data['type'] != 'live'):
logger.info('Ignoring VOD')
continue
# Was seeing some issues where the first notification had no language set, and then the second was sent
# with a different ID. Looks like Twitch may have fixed this, so commenting to prevent notifications
# being ignored.
#if ('language' in live_data) and (live_data['language'] == ''):
# logger.info("Ignoring live data with no language set")
# continue
start_time = dateutil.parser.parse(live_data['started_at'])
ourtz = pytz.timezone('Europe/London')
time_now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
start_time_local = start_time.astimezone(ourtz)
time_diff = time_now - start_time
logger.info("Started %d:%02d Delay %s" % (start_time_local.hour, start_time_local.minute, time_diff))
user_id = live_data['user_id']
user = await get_user(client_id, user_id)
last_stream = None
if user_id in stream_state:
last_stream = stream_state[user_id]
game_title = await get_game_title(client_id, live_data['game_id'], user_id)
user_url = "https://twitch.tv/%s" % user['login']
embed = discord.Embed(title = user_url, url = user_url, color = 2207743)
embed.set_author(name = "I say, %s has gone live!" % user['display_name'], url = user_url)
embed.set_thumbnail(url = user['profile_image_url'])
embed.add_field(name = game_title, value = live_data['title'], inline = False)
if user['login'] == 'evenibzy':
embed.set_footer(text = ("Stream started %s" % ibzytime(start_time_local.hour, start_time_local.minute)))
else:
embed.set_footer(text = ("Stream started %d:%02d" % (start_time_local.hour, start_time_local.minute)))
channels = config['discord']['channels']
channel_name = channels['_default_']
delete = True
if user['login'] in channels:
channel_name = channels[user['login']]
delete = False
logger.debug("channel_name=%s" % channel_name)
channel = discord.utils.get(server.channels, name = channel_name)
try:
new_stream = {}
new_stream['message'] = await client.send_message(channel, embed = embed)
stream_state[user_id] = new_stream
users_announced.append(user['display_name'])
logger.debug('Sent %s:%s' % (user['login'], new_stream['message'].id))
if last_stream and delete:
logger.debug('Deleting %s:%s' % (user['login'], last_stream['message'].id))
try:
await client.delete_message(last_stream['message'])
except:
logger.exception('Delete failed')
elif not delete:
logger.debug('No delete on this stream')
else:
logger.debug('No prior stream to delete')
except:
logger.exception('Discord badness')
logger.error("channel_name=%s" % channel_name)
logger.error("embed=%s" % embed.to_dict())
except:
logger.exception('Stream badness')
return users_announced
async def sub_unsub_user(config, user_logins, subscribe, users = None):
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
# Post data for subscription request
sub_data = { "hub.mode": "subscribe" if subscribe else "unsubscribe",
"hub.lease_seconds": 864000,
"hub.secret": config['twitch']['secret']
}
if not users:
users = await lookup_users(config, list(map(lambda u: ('login', u), user_logins)))
user_names = '' | sub_data['hub.topic'] = "https://api.twitch.tv/helix/streams?user_id=%s" % user['id']
# Send a (un)subcription request for each username
async with aiohttp.post('https://api.twitch.tv/helix/webhooks/hub', headers=headers, data=json.dumps(sub_data)) as r:
if r.status== 202:
logger.info('%s OK' % sub_data['hub.topic'])
user_names += ' %s' % user['display_name']
user_ids.append(user['id'])
else:
logger.error('Went wrong %d' % r.status)
logger.error(await r.text())
if len(user_ids) > 0:
if subscribe:
return ("Right-ho, I've asked those lovely chaps at Twitch to tell me when**%s** goes live" % user_names, user_ids)
else:
return ("Right-ho, I've asked those lovely chaps at Twitch stop telling me about**%s**" % user_names, user_ids)
return ("Sorry, old-bean. I couldn't find anyone.", None)
async def sub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, True)
async def unsub_user(config, user_logins):
return await sub_unsub_user(config, user_logins, False)
async def announce_user(client, config, server, user_logins):
response = "Nothing doing, I'm afraid"
logger.info(user_logins)
headers = { 'Client-ID': config['twitch']['client-id'],
'Content-Type': 'application/json' }
params = list(map(lambda u: ('user_login', u), user_logins))
async with aiohttp.get('https://api.twitch.tv/helix/streams', headers=headers, params=params) as r:
if r.status == 200:
streams_json = await r.json()
users = await parse_streams(client, config, server, streams_json)
if len(users) > 0:
response = "Announced %s" % (' '.join(users))
return (response, None)
async def get_subs(config):
headers = { 'Authorization': 'Bearer %s' % config['twitch']['app-token'] }
get_more = True
user_ids = []
params = None
while get_more:
get_more = False
async with aiohttp.get('https://api.twitch.tv/helix/webhooks/subscriptions', headers=headers, params=params) as r:
if r.status == 200:
subs = await | user_ids = []
# Send a (un)subcription request for each username
for user in users:
logger.info('%s: %s' % (user['display_name'], user['id']))
sub_data['hub.callback'] = "%s?lb3.server=%s&lb3.user_id=%s" % (config['twitch']['webhook_uri'], config['discord']['server'], user['id']) | random_line_split |
msg.go | riters = w
}
// Set the time format. If it is empty, set the default.
if t == "" {
obj.TimeFormat = "2006-01-02 15:05:05.000 MST"
} else {
obj.TimeFormat = t
}
// Set the format. If it is empty use the default.
if f == "" {
obj.Format = `%(-27)time %(-7)type %file %line - %msg`
} else {
obj.Format = f
}
// Parse the format.
ofmt, oflds, err := ParseFormatString(obj.Format)
obj.outputFormat = ofmt
obj.outputFlds = oflds
return
}
/*
Debug prints a debug message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Debug("%v = %v", key, value)
*/
func (o Object) Debug(f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", 2, f, a...)
}
}
/*
DebugWithLevel prints a debug message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.DebugWithLevel(2, "%v = %v", key, value)
*/
func (o Object) DebugWithLevel(l int, f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", l, f, a...)
}
}
/*
Info prints an info message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Info("%v = %v", key, value)
*/
func (o Object) Info(f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", 2, f, a...)
}
}
/*
InfoWithLevel prints an info message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.InfoWithLevel(2, "%v = %v", key, value)
*/
func (o Object) InfoWithLevel(l int, f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", l, f, a...)
}
}
/*
Warn prints a warning message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Warn("%v = %v", key, value)
*/
func (o Object) Warn(f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
WarnWithLevel prints a warning message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.WarnWithLevel(2, "%v = %v", key, value)
*/
func (o Object) WarnWithLevel(l int, f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
Err prints an error message obtaining the callers filename, function and
line number and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.Err("%v = %v", key, value)
*/
func (o Object) Err(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug() and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrNoExit prints an error message obtaining the callers filename, function and
line number. It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExit("%v = %v", key, value)
*/
func (o Object) ErrNoExit(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
ErrNoExitWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug(). It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExitWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
Printf prints directly to the log without the format string.
It allows you to insert arbitrary text.
Unlike the other functions it does not automatically append a new line.
Example:
msg.Printf("this is just random text that goes to all writers\n")
*/
func (o Object) Printf(f string, a ...interface{}) {
// Create the formatted output string.
s := fmt.Sprintf(f, a...)
// Output it for each writer.
for _, w := range o.Writers {
fmt.Fprintf(w, s)
}
}
/*
PrintMsg is the basis of all message printers except Printf. It prints the
formatted messages and normally would not be called directly.
t - is the type, normally one of DEBUG, INFO, WARNING or ERROR
l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on
f - format string
a - argument list
*/
func (o Object) PrintMsg(t string, l int, f string, a ...interface{}) {
pc, fname, lineno, _ := runtime.Caller(l)
fct := runtime.FuncForPC(pc).Name()
fname = path.Base(fname[0 : len(fname)-3]) // strip off ".go"
// The variables map for the format string.
m := map[string]string{
"file": fname,
"func": fct,
"line": strconv.Itoa(lineno),
"msg": fmt.Sprintf(f, a...),
"pkg": o.Name,
"time": time.Now().Truncate(time.Millisecond).Format(o.TimeFormat),
"utc": time.Now().UTC().Truncate(time.Millisecond).Format(o.TimeFormat),
"type": t,
}
// Collect the field values.
var flds []interface{}
for _, k := range o.outputFlds {
if v, ok := m[k]; ok {
flds = append(flds, v)
} else {
// This is, essentially, an assert. It should never happen.
fmt.Fprintf(os.Stderr, "ERROR: unexpected condition, invalid specification id '%v'\n", k)
os.Exit(1)
}
}
// Create the formatted output string.
s := fmt.Sprintf(o.outputFormat, flds...) + "\n"
// Output it for each writer.
for _, w := range o.Writers {
_, err := fmt.Fprintf(w, s)
if err != nil {
fmt.Fprintf(os.Stderr, `
FATAL: fmt.Fprintf() failed for writer %v
call stack = %v %v %v
output = %v
error = %v
`, w, m["file"], m["func"], m["line"], s[:len(s)-2], err)
os.Exit(1)
}
}
}
/*
ParseFormatString transforms a format template to a format string
and the list of fields to print in each message.
It is meant to be used internally by NewMsg().
Here is an example transformation:
input = "MYSTUFF %(-27)time %(-7)type %file %line - %msg"
// TRANSFORM
ofmt = "MYSTUFF %-27v %-7v %v %v - %v"
oids = ["time", "type", "type", "file", "line", "msg"]
*/
func ParseFormatString(input string) (ofmt string, oids []string, err error) {
ofmtb := []byte{}
valid := []string{"file", "func", "line", "msg", "pkg", "time", "type", "utc"}
ics := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-$")
// Define the parse states.
// normal - capture each byte
// spec - capture a specification of the form %<id> or %(<fmt>)<id>.
type state int
const (
normal state = iota
spec
)
s := normal
ib := []byte(input)
for i := 0; i < len(ib); i++ {
b := ib[i]
switch s {
case normal:
// normal state, this is all of the stuff in the
// template that is not part of a specification.
if b == '%' | {
s = spec
} | conditional_block |
|
msg.go | string, a ...interface{})
Err(f string, a ...interface{})
ErrNoExit(f string, a ...interface{})
DebugWithLevel(l int, f string, a ...interface{})
InfoWithLevel(l int, f string, a ...interface{})
WarnWithLevel(l int, f string, a ...interface{})
ErrWithLevel(l int, f string, a ...interface{})
ErrNoExitWithLevel(l int, f string, a ...interface{})
Printf(f string, a ...interface{})
}
// Object defines the logger.
type Object struct {
// Name is the package name. It is accessed in the format string by %pkg.
Name string
// DebugEnabled enables debug messages if true.
// It is true by default.
DebugEnabled bool
// InfoEnabled enables info messages if true.
// It is true by default.
InfoEnabled bool
// WarningEnabled enables warning messages if true.
// It is true by default.
WarningEnabled bool
// ErrorExitCode is the exit code to use for the Error function.
// The default is 1.
ErrorExitCode int
// Writers for the message output.
// If no writers are specified, messages go to os.Stdout.
Writers []io.Writer
// TimeFormat is the format of the prefix timestamp.
// See time.Format for details.
// The default format is: "2006-01-02 15:05:05.000 MST"
TimeFormat string
// Format is the template for the output. It has the following specifiers.
//
// %file is the caller file name
// %func is the function name
// %line is the line number
// %msg is the actual message
// %pkg is the package name
// %time is the time format in the current locale
// %utc is the time format in the UTC locale
// %type is the msg type: DEBUG, INFO, WARNING, ERROR
// %% is a single % character
//
// You can explicitly format each field by specifying the formatting
// options in parentheses.
//
// %(-28)time
//
// Any other text is left verbatim.
//
// The default format is.
// `%(-27)time %(-7)type %file %line - %msg`
Format string
// outputFormat created by NewMsg and used to generate a message.
outputFormat string
// outputFlds created by NewMsg and used to specify the fields.
outputFlds []string
}
// NewMsg makes a message object.
// n - package name
// f - format string, set to "" to get the default.
// t - time stamp format, set to "" to get the default
// w - the list of writers, if empty all messages go to stdout
func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) {
obj = new(Object)
obj.Name = n
obj.DebugEnabled = true
obj.InfoEnabled = true
obj.WarningEnabled = true
obj.ErrorExitCode = 1
if len(w) == 0 {
obj.Writers = append(obj.Writers, os.Stdout)
} else {
obj.Writers = w
}
// Set the time format. If it is empty, set the default.
if t == "" {
obj.TimeFormat = "2006-01-02 15:05:05.000 MST"
} else {
obj.TimeFormat = t
}
// Set the format. If it is empty use the default.
if f == "" {
obj.Format = `%(-27)time %(-7)type %file %line - %msg`
} else {
obj.Format = f
}
// Parse the format.
ofmt, oflds, err := ParseFormatString(obj.Format)
obj.outputFormat = ofmt
obj.outputFlds = oflds
return
}
/*
Debug prints a debug message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Debug("%v = %v", key, value)
*/
func (o Object) Debug(f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", 2, f, a...)
}
}
/*
DebugWithLevel prints a debug message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.DebugWithLevel(2, "%v = %v", key, value)
*/
func (o Object) DebugWithLevel(l int, f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", l, f, a...)
}
}
/*
Info prints an info message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Info("%v = %v", key, value)
*/
func (o Object) Info(f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", 2, f, a...)
}
}
/*
InfoWithLevel prints an info message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.InfoWithLevel(2, "%v = %v", key, value)
*/
func (o Object) InfoWithLevel(l int, f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", l, f, a...)
}
}
/*
Warn prints a warning message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Warn("%v = %v", key, value)
*/
func (o Object) Warn(f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
WarnWithLevel prints a warning message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.WarnWithLevel(2, "%v = %v", key, value)
*/
func (o Object) WarnWithLevel(l int, f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
Err prints an error message obtaining the callers filename, function and
line number and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.Err("%v = %v", key, value)
*/
func (o Object) Err(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug() and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrNoExit prints an error message obtaining the callers filename, function and
line number. It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExit("%v = %v", key, value)
*/
func (o Object) ErrNoExit(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
ErrNoExitWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug(). It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExitWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
} | It allows you to insert arbitrary text.
Unlike the other functions it does not automatically append a new line.
Example:
msg.Printf("this is just random text that goes to all writers\n")
*/
func (o Object) Printf(f string, a ...interface{}) {
// Create the formatted output string.
s := fmt.Sprintf(f, a...)
// Output it for each writer.
for _, w := range o.Writers {
fmt.Fprintf(w, s)
}
}
/*
PrintMsg is the basis of all message printers except Printf. It prints the
formatted messages and normally would not be called directly.
t - is the type, normally one of DEBUG, INFO, WARNING or ERROR
l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on
f - format string
a - argument list
*/
func (o Object) PrintMsg(t string, l int, f string, a ... |
/*
Printf prints directly to the log without the format string. | random_line_split |
msg.go | string, a ...interface{})
Err(f string, a ...interface{})
ErrNoExit(f string, a ...interface{})
DebugWithLevel(l int, f string, a ...interface{})
InfoWithLevel(l int, f string, a ...interface{})
WarnWithLevel(l int, f string, a ...interface{})
ErrWithLevel(l int, f string, a ...interface{})
ErrNoExitWithLevel(l int, f string, a ...interface{})
Printf(f string, a ...interface{})
}
// Object defines the logger.
type Object struct {
// Name is the package name. It is accessed in the format string by %pkg.
Name string
// DebugEnabled enables debug messages if true.
// It is true by default.
DebugEnabled bool
// InfoEnabled enables info messages if true.
// It is true by default.
InfoEnabled bool
// WarningEnabled enables warning messages if true.
// It is true by default.
WarningEnabled bool
// ErrorExitCode is the exit code to use for the Error function.
// The default is 1.
ErrorExitCode int
// Writers for the message output.
// If no writers are specified, messages go to os.Stdout.
Writers []io.Writer
// TimeFormat is the format of the prefix timestamp.
// See time.Format for details.
// The default format is: "2006-01-02 15:05:05.000 MST"
TimeFormat string
// Format is the template for the output. It has the following specifiers.
//
// %file is the caller file name
// %func is the function name
// %line is the line number
// %msg is the actual message
// %pkg is the package name
// %time is the time format in the current locale
// %utc is the time format in the UTC locale
// %type is the msg type: DEBUG, INFO, WARNING, ERROR
// %% is a single % character
//
// You can explicitly format each field by specifying the formatting
// options in parentheses.
//
// %(-28)time
//
// Any other text is left verbatim.
//
// The default format is.
// `%(-27)time %(-7)type %file %line - %msg`
Format string
// outputFormat created by NewMsg and used to generate a message.
outputFormat string
// outputFlds created by NewMsg and used to specify the fields.
outputFlds []string
}
// NewMsg makes a message object.
// n - package name
// f - format string, set to "" to get the default.
// t - time stamp format, set to "" to get the default
// w - the list of writers, if empty all messages go to stdout
func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) {
obj = new(Object)
obj.Name = n
obj.DebugEnabled = true
obj.InfoEnabled = true
obj.WarningEnabled = true
obj.ErrorExitCode = 1
if len(w) == 0 {
obj.Writers = append(obj.Writers, os.Stdout)
} else {
obj.Writers = w
}
// Set the time format. If it is empty, set the default.
if t == "" {
obj.TimeFormat = "2006-01-02 15:05:05.000 MST"
} else {
obj.TimeFormat = t
}
// Set the format. If it is empty use the default.
if f == "" {
obj.Format = `%(-27)time %(-7)type %file %line - %msg`
} else {
obj.Format = f
}
// Parse the format.
ofmt, oflds, err := ParseFormatString(obj.Format)
obj.outputFormat = ofmt
obj.outputFlds = oflds
return
}
/*
Debug prints a debug message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Debug("%v = %v", key, value)
*/
func (o Object) Debug(f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", 2, f, a...)
}
}
/*
DebugWithLevel prints a debug message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.DebugWithLevel(2, "%v = %v", key, value)
*/
func (o Object) DebugWithLevel(l int, f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", l, f, a...)
}
}
/*
Info prints an info message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Info("%v = %v", key, value)
*/
func (o Object) | (f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", 2, f, a...)
}
}
/*
InfoWithLevel prints an info message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.InfoWithLevel(2, "%v = %v", key, value)
*/
func (o Object) InfoWithLevel(l int, f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", l, f, a...)
}
}
/*
Warn prints a warning message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Warn("%v = %v", key, value)
*/
func (o Object) Warn(f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
WarnWithLevel prints a warning message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.WarnWithLevel(2, "%v = %v", key, value)
*/
func (o Object) WarnWithLevel(l int, f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
Err prints an error message obtaining the callers filename, function and
line number and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.Err("%v = %v", key, value)
*/
func (o Object) Err(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug() and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrNoExit prints an error message obtaining the callers filename, function and
line number. It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExit("%v = %v", key, value)
*/
func (o Object) ErrNoExit(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
ErrNoExitWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug(). It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExitWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
Printf prints directly to the log without the format string.
It allows you to insert arbitrary text.
Unlike the other functions it does not automatically append a new line.
Example:
msg.Printf("this is just random text that goes to all writers\n")
*/
func (o Object) Printf(f string, a ...interface{}) {
// Create the formatted output string.
s := fmt.Sprintf(f, a...)
// Output it for each writer.
for _, w := range o.Writers {
fmt.Fprintf(w, s)
}
}
/*
PrintMsg is the basis of all message printers except Printf. It prints the
formatted messages and normally would not be called directly.
t - is the type, normally one of DEBUG, INFO, WARNING or ERROR
l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on
f - format string
a - argument list
*/
func (o Object) PrintMsg(t string, l int, f string, a ... | Info | identifier_name |
msg.go | string, a ...interface{})
Err(f string, a ...interface{})
ErrNoExit(f string, a ...interface{})
DebugWithLevel(l int, f string, a ...interface{})
InfoWithLevel(l int, f string, a ...interface{})
WarnWithLevel(l int, f string, a ...interface{})
ErrWithLevel(l int, f string, a ...interface{})
ErrNoExitWithLevel(l int, f string, a ...interface{})
Printf(f string, a ...interface{})
}
// Object defines the logger.
type Object struct {
// Name is the package name. It is accessed in the format string by %pkg.
Name string
// DebugEnabled enables debug messages if true.
// It is true by default.
DebugEnabled bool
// InfoEnabled enables info messages if true.
// It is true by default.
InfoEnabled bool
// WarningEnabled enables warning messages if true.
// It is true by default.
WarningEnabled bool
// ErrorExitCode is the exit code to use for the Error function.
// The default is 1.
ErrorExitCode int
// Writers for the message output.
// If no writers are specified, messages go to os.Stdout.
Writers []io.Writer
// TimeFormat is the format of the prefix timestamp.
// See time.Format for details.
// The default format is: "2006-01-02 15:05:05.000 MST"
TimeFormat string
// Format is the template for the output. It has the following specifiers.
//
// %file is the caller file name
// %func is the function name
// %line is the line number
// %msg is the actual message
// %pkg is the package name
// %time is the time format in the current locale
// %utc is the time format in the UTC locale
// %type is the msg type: DEBUG, INFO, WARNING, ERROR
// %% is a single % character
//
// You can explicitly format each field by specifying the formatting
// options in parentheses.
//
// %(-28)time
//
// Any other text is left verbatim.
//
// The default format is.
// `%(-27)time %(-7)type %file %line - %msg`
Format string
// outputFormat created by NewMsg and used to generate a message.
outputFormat string
// outputFlds created by NewMsg and used to specify the fields.
outputFlds []string
}
// NewMsg makes a message object.
// n - package name
// f - format string, set to "" to get the default.
// t - time stamp format, set to "" to get the default
// w - the list of writers, if empty all messages go to stdout
func NewMsg(n string, f string, t string, w []io.Writer) (obj *Object, err error) {
obj = new(Object)
obj.Name = n
obj.DebugEnabled = true
obj.InfoEnabled = true
obj.WarningEnabled = true
obj.ErrorExitCode = 1
if len(w) == 0 {
obj.Writers = append(obj.Writers, os.Stdout)
} else {
obj.Writers = w
}
// Set the time format. If it is empty, set the default.
if t == "" {
obj.TimeFormat = "2006-01-02 15:05:05.000 MST"
} else {
obj.TimeFormat = t
}
// Set the format. If it is empty use the default.
if f == "" {
obj.Format = `%(-27)time %(-7)type %file %line - %msg`
} else {
obj.Format = f
}
// Parse the format.
ofmt, oflds, err := ParseFormatString(obj.Format)
obj.outputFormat = ofmt
obj.outputFlds = oflds
return
}
/*
Debug prints a debug message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Debug("%v = %v", key, value)
*/
func (o Object) Debug(f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", 2, f, a...)
}
}
/*
DebugWithLevel prints a debug message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.DebugWithLevel(2, "%v = %v", key, value)
*/
func (o Object) DebugWithLevel(l int, f string, a ...interface{}) {
if o.DebugEnabled {
o.PrintMsg("DEBUG", l, f, a...)
}
}
/*
Info prints an info message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Info("%v = %v", key, value)
*/
func (o Object) Info(f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", 2, f, a...)
}
}
/*
InfoWithLevel prints an info message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.InfoWithLevel(2, "%v = %v", key, value)
*/
func (o Object) InfoWithLevel(l int, f string, a ...interface{}) {
if o.InfoEnabled {
o.PrintMsg("INFO", l, f, a...)
}
}
/*
Warn prints a warning message obtaining the callers filename, function and
line number.
It automatically appends a new line.
Example:
msg.Warn("%v = %v", key, value)
*/
func (o Object) Warn(f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
WarnWithLevel prints a warning message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug().
It automatically appends a new line.
Example:
msg.WarnWithLevel(2, "%v = %v", key, value)
*/
func (o Object) WarnWithLevel(l int, f string, a ...interface{}) {
if o.WarningEnabled {
o.PrintMsg("WARNING", 2, f, a...)
}
}
/*
Err prints an error message obtaining the callers filename, function and
line number and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.Err("%v = %v", key, value)
*/
func (o Object) Err(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug() and exits. It cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
os.Exit(o.ErrorExitCode)
}
/*
ErrNoExit prints an error message obtaining the callers filename, function and
line number. It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExit("%v = %v", key, value)
*/
func (o Object) ErrNoExit(f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
ErrNoExitWithLevel prints an error message obtaining the filename, function and
line number from the caller specified by level "l". l=2 is the same
as Debug(). It does not exit and cannot be disabled.
It automatically appends a new line.
Example:
msg.ErrNoExitWithLevel(2, "%v = %v", key, value)
*/
func (o Object) ErrNoExitWithLevel(l int, f string, a ...interface{}) {
o.PrintMsg("ERROR", 2, f, a...)
}
/*
Printf prints directly to the log without the format string.
It allows you to insert arbitrary text.
Unlike the other functions it does not automatically append a new line.
Example:
msg.Printf("this is just random text that goes to all writers\n")
*/
func (o Object) Printf(f string, a ...interface{}) |
/*
PrintMsg is the basis of all message printers except Printf. It prints the
formatted messages and normally would not be called directly.
t - is the type, normally one of DEBUG, INFO, WARNING or ERROR
l - is the caller level: 0 is this function, 1 is the caller, 2 is the callers caller and so on
f - format string
a - argument list
*/
func (o Object) PrintMsg(t string, l int, f string, a | {
// Create the formatted output string.
s := fmt.Sprintf(f, a...)
// Output it for each writer.
for _, w := range o.Writers {
fmt.Fprintf(w, s)
}
} | identifier_body |
examples.js | if(i % 5 === 0) {
words += "Buzz";
} else {
words += i;
}
console.log(words);
}
/*****************WHILE LOOPS**********************/
// PRINT EVEN NUMBERS BETWEEN 10 AND 40
var count = 10;
while(count <= 40){
console.log(count);
count+=2;
}
// PRINT ALL ODD NUMBERS BETWEEN 300 333
var count = 300;
while(count <=333){
if(count % 2 === 0){
console.log(count);
}
count += 1;
}
/*****************FUNCTIONS**********************/
// Replace - with _ within a string
function kebabToSnake(str) {
//replace all '-' with '_'
var newStr = str.replace(/-/g , "_");
//return str
return newStr;
}
function wordsToSentence(words) {
//words is an array of strings
//return a string that is all of the words concatenated together
//spaces need to be between each word
//example: ['Hello', 'world!'] -> 'Hello world!'
return words.join(' ');
}
function combineNames(firstName, lastName) {
//return firstName and lastName combined as one string and separated by a space.
//'Lambda', 'School' -> 'Lambda School'
var fullName = firstName + ' ' + lastName;
return fullName;
}
function getGreeting(name) {
//Take the name string and concatenate other strings onto it so it takes the following form:
//'Sam' -> 'Hello Sam!'
var greeting = 'Hello ' + name + '!';
return greeting;
}
function getRectangleArea(length, width) {
//return the area of the rectangle by using length and width
var area = length * width;
return area;
}
function getRectangularPrismVolume(length, width, height) {
//return the volume of the 3D rectangular prism given the length, width, and height
var prismed = length * width * height;
return prismed;
}
/*****************REVERSE A STRING **********************/
// This splits the string, reverses it and joins it again
function FirstReverse(str) {
// code goes here
var splitString = str.split("");
var reverseArray = splitString.reverse();
var joinArray = reverseArray.join("");
console.log(joinArray);
}
// keep this function call here
FirstReverse("hello");
/*****************CHECK NUMBER IS PRIME **********************/
function primeChecker (num) {
if (num < 1) return false;
var newNum = Math.floor(Math.sqrt(num));
for (var i = 2; i <= newNum; i++)
{
if (num % i === 0)
{
return false;
}
}
return true;
}
console.log(primeChecker(15));
/*****************FUNCTIONS + IF ELSE**********************/
function fizzBuzz(num) {
//if num is divisible by 3 return 'fizz'
//if num is divisible by 5 return 'buzz'
//if num is divisible by 3 & 5 return 'fizzbuzz'
//otherwise return num
if(num % 3 === 0 && num % 5 === 0) {
return 'fizzbuzz';
} else if(num % 3 === 0) {
return 'fizz';
}else if(num % 5 === 0) {
return 'buzz';
} else {
return num;
}
}
function colorOf(r,g,b){
var red = r.toString(16);
if (red.length === 1) red = "0" + red;
var green = g.toString(16);
if (green.length === 1) green = "0" + green;
var blue = b.toString(16);
if (blue.length === 1) blue = "0" + blue;
return "#" + red + green + blue;
}
colorOf(255,0,0); // outputs '#ff0000'
// turns numbers to a hex code string
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
for(var i = 2; i < num; i++) {
if(num % i === 0) {
return false;
}
}
return num > 1;
}
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
if(num < 2) return false;
for (var i = 2; i < num; i++) {
if(num%i==0)
return false;
}
return true;
}
function personAge(yearOfBirth) {
var age = 2017 - yearOfBirth;
if(age >= 16) {
console.log("you can drive");
} else {
console.log("you are not old enough to drive");
}
}
personAge(2011);
//find the middle of a random odd or even string
function mid(str) {
var middle = Math.floor(str.length / 2);
if(str.length % 2 === 0){
return str[middle - 1] + str[middle];
} else {
return str[middle];
}
}
console.log(mid('computer'));
//access array items
function uefaEuro2016(teams, scores){
// your code...
if(scores[0] === scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.';
} else if(scores[0] > scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!';
} else if(scores[1] > scores[0]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!';
}
}
(uefaEuro2016(['Germany', 'Ukraine'], [2, 0])
/*****************FUNCTIONS + FOR LOOP**********************/
function averageTestScore(testScores) {
//testScores is an array. Iterate over testScores and compute the average.
//return the average
var average = 0;
for (var i = 0; i < testScores.length; i++) {
average = average + testScores[i];
}
average = average / testScores.length;
return average;
}
// LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY
function pickIt(arr){
var odd=[],even=[];
for (var i = 0; i < arr.length; i++){
if (arr[i] % 2 === 1) {
odd.push(arr[i]);
} else {
even.push(arr[i]);
}
}
return [odd,even];
}
pickIt(6,9);
// LOOP IN A LOOP :O
console.log('******************************');
var twoDimensionArray = [
['Amul', 'Amanda', 'Harriet'],
['Anisa', 'Mauro', 'Matty'],
['Jonnie', 'Emily', 'Nick']
];
for( var i = 0; i < twoDimensionArray.length; i++ ){
var namesArr = twoDimensionArray[i];
for( var x = 0; x < namesArr.length; x++ ){
console.log( twoDimensionArray[i][x] + ' is in array ' + i +
' in the twoDimensional array and has index ' + x + ' in its own array');
}
}
//function with a for loop, if statement, break and continue
function grabDoll(dolls){
var bag=[];
for(var i = 0; i < dolls.length; i++) {
if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll")
bag.push(dolls[i]);
else
continue;
if(bag.length === 3) break;
}
return bag;
}
/*****************FUNCTIONS + IF ELSE + FOR **********************/
function largestNumber(numbers) {
//numbers is an array of integers
//return the largest integer
var largestNum = 0;
for (var i = 0; i < numbers.length; i++) {
if (numbers[i] > largestNum) {
largestNum = numbers[i];
}
}
return largestNum;
}
/*****************FUNCTIONS + OBJECTS**********************/
function makeCat(name, age) | {
//create a new object with a name property with the value set to the name argument
//add an age property to the object with the value set to the age argument
//add a method called meow that returns the string 'Meow!'
//return the object
var newCat = {
name: name,
age: age,
meow:function () {
return 'Meow!';
}
};
return newCat;
} | identifier_body |
|
examples.js | numbers between 300 and 333
for(var i = 300; i <= 333; i++) {
if(i % 2 !== 0) {
console.log(i);
}
}
// fizz buzz question
// print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz
for (var i = 1; i <= 100; i++) {
var words = "";
if (i % 3 === 0 && i % 5 === 0) {
words += "FizzBuzz";
} else if (i % 3 === 0) {
words += "Fizz";
} else if(i % 5 === 0) {
words += "Buzz";
} else {
words += i;
}
console.log(words);
}
/*****************WHILE LOOPS**********************/
// PRINT EVEN NUMBERS BETWEEN 10 AND 40
var count = 10;
while(count <= 40){
console.log(count);
count+=2;
}
// PRINT ALL ODD NUMBERS BETWEEN 300 333
var count = 300;
while(count <=333){
if(count % 2 === 0){
console.log(count);
}
count += 1;
}
/*****************FUNCTIONS**********************/
// Replace - with _ within a string
function kebabToSnake(str) {
//replace all '-' with '_'
var newStr = str.replace(/-/g , "_");
//return str
return newStr;
}
function wordsToSentence(words) {
//words is an array of strings
//return a string that is all of the words concatenated together
//spaces need to be between each word
//example: ['Hello', 'world!'] -> 'Hello world!'
return words.join(' ');
}
function combineNames(firstName, lastName) {
//return firstName and lastName combined as one string and separated by a space.
//'Lambda', 'School' -> 'Lambda School'
var fullName = firstName + ' ' + lastName;
return fullName;
}
function getGreeting(name) {
//Take the name string and concatenate other strings onto it so it takes the following form:
//'Sam' -> 'Hello Sam!'
var greeting = 'Hello ' + name + '!';
return greeting;
}
function getRectangleArea(length, width) {
//return the area of the rectangle by using length and width
var area = length * width;
return area;
}
function getRectangularPrismVolume(length, width, height) {
//return the volume of the 3D rectangular prism given the length, width, and height
var prismed = length * width * height;
return prismed;
}
/*****************REVERSE A STRING **********************/
// This splits the string, reverses it and joins it again
function FirstReverse(str) {
// code goes here
var splitString = str.split("");
var reverseArray = splitString.reverse();
var joinArray = reverseArray.join("");
console.log(joinArray);
}
// keep this function call here
FirstReverse("hello");
/*****************CHECK NUMBER IS PRIME **********************/
function primeChecker (num) {
if (num < 1) return false;
var newNum = Math.floor(Math.sqrt(num));
for (var i = 2; i <= newNum; i++)
{
if (num % i === 0)
{
return false;
}
}
return true;
}
console.log(primeChecker(15));
/*****************FUNCTIONS + IF ELSE**********************/
function fizzBuzz(num) {
//if num is divisible by 3 return 'fizz'
//if num is divisible by 5 return 'buzz'
//if num is divisible by 3 & 5 return 'fizzbuzz'
//otherwise return num
if(num % 3 === 0 && num % 5 === 0) {
return 'fizzbuzz';
} else if(num % 3 === 0) {
return 'fizz';
}else if(num % 5 === 0) {
return 'buzz';
} else { | function colorOf(r,g,b){
var red = r.toString(16);
if (red.length === 1) red = "0" + red;
var green = g.toString(16);
if (green.length === 1) green = "0" + green;
var blue = b.toString(16);
if (blue.length === 1) blue = "0" + blue;
return "#" + red + green + blue;
}
colorOf(255,0,0); // outputs '#ff0000'
// turns numbers to a hex code string
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
for(var i = 2; i < num; i++) {
if(num % i === 0) {
return false;
}
}
return num > 1;
}
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
if(num < 2) return false;
for (var i = 2; i < num; i++) {
if(num%i==0)
return false;
}
return true;
}
function personAge(yearOfBirth) {
var age = 2017 - yearOfBirth;
if(age >= 16) {
console.log("you can drive");
} else {
console.log("you are not old enough to drive");
}
}
personAge(2011);
//find the middle of a random odd or even string
function mid(str) {
var middle = Math.floor(str.length / 2);
if(str.length % 2 === 0){
return str[middle - 1] + str[middle];
} else {
return str[middle];
}
}
console.log(mid('computer'));
//access array items
function uefaEuro2016(teams, scores){
// your code...
if(scores[0] === scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.';
} else if(scores[0] > scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!';
} else if(scores[1] > scores[0]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!';
}
}
(uefaEuro2016(['Germany', 'Ukraine'], [2, 0])
/*****************FUNCTIONS + FOR LOOP**********************/
function averageTestScore(testScores) {
//testScores is an array. Iterate over testScores and compute the average.
//return the average
var average = 0;
for (var i = 0; i < testScores.length; i++) {
average = average + testScores[i];
}
average = average / testScores.length;
return average;
}
// LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY
function pickIt(arr){
var odd=[],even=[];
for (var i = 0; i < arr.length; i++){
if (arr[i] % 2 === 1) {
odd.push(arr[i]);
} else {
even.push(arr[i]);
}
}
return [odd,even];
}
pickIt(6,9);
// LOOP IN A LOOP :O
console.log('******************************');
var twoDimensionArray = [
['Amul', 'Amanda', 'Harriet'],
['Anisa', 'Mauro', 'Matty'],
['Jonnie', 'Emily', 'Nick']
];
for( var i = 0; i < twoDimensionArray.length; i++ ){
var namesArr = twoDimensionArray[i];
for( var x = 0; x < namesArr.length; x++ ){
console.log( twoDimensionArray[i][x] + ' is in array ' + i +
' in the twoDimensional array and has index ' + x + ' in its own array');
}
}
//function with a for loop, if statement, break and continue
function grabDoll(dolls){
var bag=[];
for(var i = 0; i < dolls.length; i++) {
if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll")
bag.push(dolls[i]);
else
continue;
if(bag.length === 3) break;
}
return bag;
}
/*****************FUNCTIONS + IF ELSE + FOR **********************/
function largestNumber(numbers) {
//numbers is an array of integers
//return the largest integer
var largestNum = 0;
for (var i = | return num;
}
}
| random_line_split |
examples.js | (n) {
if (n === 0) {
return 1;
}
// This is it! Recursion!!
return n * factorial(n - 1);
}
console.log(factorial(10));
/*****************FOR LOOPS**********************/
// print all odd numbers between 300 and 333
for(var i = 300; i <= 333; i++) {
if(i % 2 !== 0) {
console.log(i);
}
}
// fizz buzz question
// print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz
for (var i = 1; i <= 100; i++) {
var words = "";
if (i % 3 === 0 && i % 5 === 0) {
words += "FizzBuzz";
} else if (i % 3 === 0) {
words += "Fizz";
} else if(i % 5 === 0) {
words += "Buzz";
} else {
words += i;
}
console.log(words);
}
/*****************WHILE LOOPS**********************/
// PRINT EVEN NUMBERS BETWEEN 10 AND 40
var count = 10;
while(count <= 40){
console.log(count);
count+=2;
}
// PRINT ALL ODD NUMBERS BETWEEN 300 333
var count = 300;
while(count <=333){
if(count % 2 === 0){
console.log(count);
}
count += 1;
}
/*****************FUNCTIONS**********************/
// Replace - with _ within a string
function kebabToSnake(str) {
//replace all '-' with '_'
var newStr = str.replace(/-/g , "_");
//return str
return newStr;
}
function wordsToSentence(words) {
//words is an array of strings
//return a string that is all of the words concatenated together
//spaces need to be between each word
//example: ['Hello', 'world!'] -> 'Hello world!'
return words.join(' ');
}
function combineNames(firstName, lastName) {
//return firstName and lastName combined as one string and separated by a space.
//'Lambda', 'School' -> 'Lambda School'
var fullName = firstName + ' ' + lastName;
return fullName;
}
function getGreeting(name) {
//Take the name string and concatenate other strings onto it so it takes the following form:
//'Sam' -> 'Hello Sam!'
var greeting = 'Hello ' + name + '!';
return greeting;
}
function getRectangleArea(length, width) {
//return the area of the rectangle by using length and width
var area = length * width;
return area;
}
function getRectangularPrismVolume(length, width, height) {
//return the volume of the 3D rectangular prism given the length, width, and height
var prismed = length * width * height;
return prismed;
}
/*****************REVERSE A STRING **********************/
// This splits the string, reverses it and joins it again
function FirstReverse(str) {
// code goes here
var splitString = str.split("");
var reverseArray = splitString.reverse();
var joinArray = reverseArray.join("");
console.log(joinArray);
}
// keep this function call here
FirstReverse("hello");
/*****************CHECK NUMBER IS PRIME **********************/
function primeChecker (num) {
if (num < 1) return false;
var newNum = Math.floor(Math.sqrt(num));
for (var i = 2; i <= newNum; i++)
{
if (num % i === 0)
{
return false;
}
}
return true;
}
console.log(primeChecker(15));
/*****************FUNCTIONS + IF ELSE**********************/
function fizzBuzz(num) {
//if num is divisible by 3 return 'fizz'
//if num is divisible by 5 return 'buzz'
//if num is divisible by 3 & 5 return 'fizzbuzz'
//otherwise return num
if(num % 3 === 0 && num % 5 === 0) {
return 'fizzbuzz';
} else if(num % 3 === 0) {
return 'fizz';
}else if(num % 5 === 0) {
return 'buzz';
} else {
return num;
}
}
function colorOf(r,g,b){
var red = r.toString(16);
if (red.length === 1) red = "0" + red;
var green = g.toString(16);
if (green.length === 1) green = "0" + green;
var blue = b.toString(16);
if (blue.length === 1) blue = "0" + blue;
return "#" + red + green + blue;
}
colorOf(255,0,0); // outputs '#ff0000'
// turns numbers to a hex code string
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
for(var i = 2; i < num; i++) {
if(num % i === 0) {
return false;
}
}
return num > 1;
}
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
if(num < 2) return false;
for (var i = 2; i < num; i++) {
if(num%i==0)
return false;
}
return true;
}
function personAge(yearOfBirth) {
var age = 2017 - yearOfBirth;
if(age >= 16) {
console.log("you can drive");
} else {
console.log("you are not old enough to drive");
}
}
personAge(2011);
//find the middle of a random odd or even string
function mid(str) {
var middle = Math.floor(str.length / 2);
if(str.length % 2 === 0){
return str[middle - 1] + str[middle];
} else {
return str[middle];
}
}
console.log(mid('computer'));
//access array items
function uefaEuro2016(teams, scores){
// your code...
if(scores[0] === scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.';
} else if(scores[0] > scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!';
} else if(scores[1] > scores[0]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!';
}
}
(uefaEuro2016(['Germany', 'Ukraine'], [2, 0])
/*****************FUNCTIONS + FOR LOOP**********************/
function averageTestScore(testScores) {
//testScores is an array. Iterate over testScores and compute the average.
//return the average
var average = 0;
for (var i = 0; i < testScores.length; i++) {
average = average + testScores[i];
}
average = average / testScores.length;
return average;
}
// LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY
function pickIt(arr){
var odd=[],even=[];
for (var i = 0; i < arr.length; i++){
if (arr[i] % 2 === 1) {
odd.push(arr[i]);
} else {
even.push(arr[i]);
}
}
return [odd,even];
}
pickIt(6,9);
// LOOP IN A LOOP :O
console.log('******************************');
var twoDimensionArray = [
['Amul', 'Amanda', 'Harriet'],
['Anisa', 'Mauro', 'Matty'],
['Jonnie', 'Emily', 'Nick']
];
for( var i = 0; i < twoDimensionArray.length; i++ ){
var namesArr = twoDimensionArray[i];
for( var x = 0; x < namesArr.length; x++ ){
console.log( twoDimensionArray[i][x] + ' is in array ' + i +
' in the twoDimensional array and has index ' + x + ' in its own array');
}
}
//function with a for loop, if statement, break and continue
function grabDoll(dolls){
var bag=[];
for(var i = 0; i < dolls.length; i++) {
if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll")
bag.push(dolls[i]);
else
continue;
if(b | factorial | identifier_name |
|
examples.js | between 300 and 333
for(var i = 300; i <= 333; i++) {
if(i % 2 !== 0) {
console.log(i);
}
}
// fizz buzz question
// print 1 to 100 any num / by 3 print fizz, / 5 buzz and both fizzbuzz
for (var i = 1; i <= 100; i++) {
var words = "";
if (i % 3 === 0 && i % 5 === 0) {
words += "FizzBuzz";
} else if (i % 3 === 0) {
words += "Fizz";
} else if(i % 5 === 0) {
words += "Buzz";
} else {
words += i;
}
console.log(words);
}
/*****************WHILE LOOPS**********************/
// PRINT EVEN NUMBERS BETWEEN 10 AND 40
var count = 10;
while(count <= 40){
console.log(count);
count+=2;
}
// PRINT ALL ODD NUMBERS BETWEEN 300 333
var count = 300;
while(count <=333){
if(count % 2 === 0){
console.log(count);
}
count += 1;
}
/*****************FUNCTIONS**********************/
// Replace - with _ within a string
function kebabToSnake(str) {
//replace all '-' with '_'
var newStr = str.replace(/-/g , "_");
//return str
return newStr;
}
function wordsToSentence(words) {
//words is an array of strings
//return a string that is all of the words concatenated together
//spaces need to be between each word
//example: ['Hello', 'world!'] -> 'Hello world!'
return words.join(' ');
}
function combineNames(firstName, lastName) {
//return firstName and lastName combined as one string and separated by a space.
//'Lambda', 'School' -> 'Lambda School'
var fullName = firstName + ' ' + lastName;
return fullName;
}
function getGreeting(name) {
//Take the name string and concatenate other strings onto it so it takes the following form:
//'Sam' -> 'Hello Sam!'
var greeting = 'Hello ' + name + '!';
return greeting;
}
function getRectangleArea(length, width) {
//return the area of the rectangle by using length and width
var area = length * width;
return area;
}
function getRectangularPrismVolume(length, width, height) {
//return the volume of the 3D rectangular prism given the length, width, and height
var prismed = length * width * height;
return prismed;
}
/*****************REVERSE A STRING **********************/
// This splits the string, reverses it and joins it again
function FirstReverse(str) {
// code goes here
var splitString = str.split("");
var reverseArray = splitString.reverse();
var joinArray = reverseArray.join("");
console.log(joinArray);
}
// keep this function call here
FirstReverse("hello");
/*****************CHECK NUMBER IS PRIME **********************/
function primeChecker (num) {
if (num < 1) return false;
var newNum = Math.floor(Math.sqrt(num));
for (var i = 2; i <= newNum; i++)
{
if (num % i === 0)
{
return false;
}
}
return true;
}
console.log(primeChecker(15));
/*****************FUNCTIONS + IF ELSE**********************/
function fizzBuzz(num) {
//if num is divisible by 3 return 'fizz'
//if num is divisible by 5 return 'buzz'
//if num is divisible by 3 & 5 return 'fizzbuzz'
//otherwise return num
if(num % 3 === 0 && num % 5 === 0) {
return 'fizzbuzz';
} else if(num % 3 === 0) {
return 'fizz';
}else if(num % 5 === 0) {
return 'buzz';
} else {
return num;
}
}
function colorOf(r,g,b){
var red = r.toString(16);
if (red.length === 1) red = "0" + red;
var green = g.toString(16);
if (green.length === 1) green = "0" + green;
var blue = b.toString(16);
if (blue.length === 1) blue = "0" + blue;
return "#" + red + green + blue;
}
colorOf(255,0,0); // outputs '#ff0000'
// turns numbers to a hex code string
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
for(var i = 2; i < num; i++) {
if(num % i === 0) |
}
return num > 1;
}
function isPrime(num) {
//return true if num is prime.
//otherwise return false
//hint: a prime number is only evenly divisible by itself and 1
//hint2: you can solve this using a for loop
//note: 0 and 1 are NOT considered prime numbers
if(num < 2) return false;
for (var i = 2; i < num; i++) {
if(num%i==0)
return false;
}
return true;
}
function personAge(yearOfBirth) {
var age = 2017 - yearOfBirth;
if(age >= 16) {
console.log("you can drive");
} else {
console.log("you are not old enough to drive");
}
}
personAge(2011);
//find the middle of a random odd or even string
function mid(str) {
var middle = Math.floor(str.length / 2);
if(str.length % 2 === 0){
return str[middle - 1] + str[middle];
} else {
return str[middle];
}
}
console.log(mid('computer'));
//access array items
function uefaEuro2016(teams, scores){
// your code...
if(scores[0] === scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', teams played draw.';
} else if(scores[0] > scores[1]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[0] + ' won!';
} else if(scores[1] > scores[0]) {
return 'At match ' + teams[0] + ' - ' + teams[1] + ', ' + teams[1] + ' won!';
}
}
(uefaEuro2016(['Germany', 'Ukraine'], [2, 0])
/*****************FUNCTIONS + FOR LOOP**********************/
function averageTestScore(testScores) {
//testScores is an array. Iterate over testScores and compute the average.
//return the average
var average = 0;
for (var i = 0; i < testScores.length; i++) {
average = average + testScores[i];
}
average = average / testScores.length;
return average;
}
// LOOP OVER ARRAY, PUSH ELEMENT INTO EMPTY ARRAY
function pickIt(arr){
var odd=[],even=[];
for (var i = 0; i < arr.length; i++){
if (arr[i] % 2 === 1) {
odd.push(arr[i]);
} else {
even.push(arr[i]);
}
}
return [odd,even];
}
pickIt(6,9);
// LOOP IN A LOOP :O
console.log('******************************');
var twoDimensionArray = [
['Amul', 'Amanda', 'Harriet'],
['Anisa', 'Mauro', 'Matty'],
['Jonnie', 'Emily', 'Nick']
];
for( var i = 0; i < twoDimensionArray.length; i++ ){
var namesArr = twoDimensionArray[i];
for( var x = 0; x < namesArr.length; x++ ){
console.log( twoDimensionArray[i][x] + ' is in array ' + i +
' in the twoDimensional array and has index ' + x + ' in its own array');
}
}
//function with a for loop, if statement, break and continue
function grabDoll(dolls){
var bag=[];
for(var i = 0; i < dolls.length; i++) {
if(dolls[i] === "Hello Kitty" || dolls[i] === "Barbie doll")
bag.push(dolls[i]);
else
continue;
if(bag.length === 3) break;
}
return bag;
}
/*****************FUNCTIONS + IF ELSE + FOR **********************/
function largestNumber(numbers) {
//numbers is an array of integers
//return the largest integer
var largestNum = 0;
for (var i | {
return false;
} | conditional_block |
header.go | avoid an extra copy or using a
// ByteReader, which would just have an internal buffer and be slower.
r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b))
return r, nil
case TypeChar: // Char and Bin are different because they're offset differently.
r := make([]byte, int(e.count))
if _, err := sr.ReadAt(r, 0); err != nil {
return nil, fmt.Errorf("rpm: header: error reading char: %w", err)
}
return r, nil
}
panic("unreachable")
default:
}
return nil, fmt.Errorf("unknown type: %v", e.Type)
}
// SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1).
func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\x00'); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error {
const (
headerSz = 8
tagsMax = 0x0000ffff
dataMax = 0x0fffffff
sizeMax = 256 * 1024 * 1024
)
b := make([]byte, headerSz)
if _, err := r.ReadAt(b, 0); err != nil {
return fmt.Errorf("header: failed to read: %w", err)
}
tagsCt := binary.BigEndian.Uint32(b[0:])
dataSz := binary.BigEndian.Uint32(b[4:])
if tagsCt > tagsMax {
return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt)
}
if dataSz > dataMax {
return fmt.Errorf("header botch: data length (%d) out of range", dataSz)
}
tagsSz := int64(tagsCt) * entryInfoSize
// Sanity check, if possible:
var inSz int64
switch v := r.(type) {
case interface{ Size() int64 }:
// Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these.
inSz = v.Size()
case io.Seeker:
// Seek if present.
var err error
inSz, err = v.Seek(0, io.SeekEnd)
if err != nil {
return err
}
default:
// Do a read for the end of the segment.
end := preambleSize + tagsSz + int64(dataSz)
if _, err := r.ReadAt(b, end-int64(len(b))); err != nil {
return err
}
inSz = end
}
if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz {
return fmt.Errorf("not enough data")
}
if tagsCt == 0 {
return fmt.Errorf("no tags")
}
h.tags = io.NewSectionReader(r, headerSz, tagsSz)
h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz))
h.Infos = make([]EntryInfo, tagsCt)
return nil
}
// ErrNoRegion is a signal back from verifyRegion that the first tag is not one
// of the expected ones.
//
// This being reported means that the region verification has been
// short-circuited.
var errNoRegion = errors.New("no initial region tag, this is probably a bdb database")
func (h *Header) verifyRegion(ctx context.Context) error {
const regionTagCount = 16
region, err := h.loadTag(ctx, 0)
if err != nil {
return err
}
switch region.Tag {
case TagHeaderSignatures:
case TagHeaderImmutable:
case TagHeaderImage:
default:
return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion)
}
if region.Type != TypeBin || region.count != regionTagCount {
return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count)
}
if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) {
return fmt.Errorf("nonsense region offset")
}
var trailer EntryInfo
b := make([]byte, entryInfoSize)
if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil {
return err
}
if err := trailer.UnmarshalBinary(b); err != nil {
return err
}
rDataLen := region.offset + regionTagCount
trailer.offset = -trailer.offset // trailer offset is negative and special
rIdxLen := trailer.offset / entryInfoSize
// Fixup copied out of librpm:
if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage {
trailer.Tag = TagHeaderSignatures
}
if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount {
return fmt.Errorf("bad region trailer: %v", trailer)
}
if (trailer.offset%entryInfoSize != 0) ||
int64(rIdxLen) > h.tags.Size() ||
int64(rDataLen) > h.data.Size() {
return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d",
region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size())
}
h.region = region.Tag
return nil
}
// VerifyInfo verifies the "info" segments in the header.
//
// Experimentally, bdb database aren't always sorted the expected way. The
// passed boolean controls whether this method uses lax verification or not.
func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error {
lim := len(h.Infos)
typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage
var prev int32
start := 1
if isBDB {
start--
}
for i := start; i < lim; i++ {
e, err := h.loadTag(ctx, i)
if err != nil {
return err
}
switch {
case prev > e.offset:
return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset)
case e.Tag < TagHeaderI18nTable && !isBDB:
return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable)
case e.Type < TypeMin || e.Type > TypeMax:
return fmt.Errorf("botched entry: bad type %v", e.Type)
case e.count == 0 || int64(e.count) > h.data.Size():
return fmt.Errorf("botched entry: bad count %d", e.count)
case (e.Type.alignment()-1)&e.offset != 0:
return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset)
case e.offset < 0 || int64(e.offset) > h.data.Size():
return fmt.Errorf("botched entry: bad offset %d", e.offset)
case typecheck && !checkTagType(e.Tag, e.Type):
return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type)
}
}
return nil
}
func checkTagType(key Tag, typ Kind) bool {
if i, ok := tagByValue[key]; ok {
t := tagTable[i].Type
// Check the type. Some versions of string are typed incorrectly in a
// compatible way.
return t == typ || t.class() == typ.class()
}
// Unknown tags get a pass.
return true
}
func (h *Header) loadTag(ctx context.Context, i int) (*EntryInfo, error) {
e := &h.Infos[i]
if e.Tag == Tag(0) {
b := make([]byte, entryInfoSize)
if _, err := h.tags.ReadAt(b, int64(i)*entryInfoSize); err != nil {
return nil, fmt.Errorf("header: error reading EntryInfo: %w", err)
}
if err := e.UnmarshalBinary(b); err != nil {
return nil, fmt.Errorf("header: martian EntryInfo: %w", err)
}
}
return e, nil
}
// EntryInfo describes an entry for the given Tag.
type EntryInfo struct {
Tag Tag
Type Kind
offset int32
count uint32
}
func (e *EntryInfo) String() string {
return fmt.Sprintf("tag %v type %v offset %d count %d", e.Tag, e.Type, e.offset, e.count)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (e *EntryInfo) | UnmarshalBinary | identifier_name |
|
header.go | header: error reading %T: %w", r[0], err)
}
r[i] = int32(binary.BigEndian.Uint32(b))
}
return r, nil
case TypeInt16:
r := make([]int16, int(e.count))
b := make([]byte, 2)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = int16(binary.BigEndian.Uint16(b))
}
return r, nil
case TypeInt8:
b := make([]byte, int(e.count))
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading int8: %w", err)
}
// Despite byte == uint8 and uint8 being convertible to int8, this is
// the only way I can figure out to avoid an extra copy or using a
// ByteReader, which would just have an internal buffer and be slower.
r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b))
return r, nil
case TypeChar: // Char and Bin are different because they're offset differently.
r := make([]byte, int(e.count))
if _, err := sr.ReadAt(r, 0); err != nil {
return nil, fmt.Errorf("rpm: header: error reading char: %w", err)
}
return r, nil
}
panic("unreachable")
default:
}
return nil, fmt.Errorf("unknown type: %v", e.Type)
}
// SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1).
func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\x00'); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error {
const (
headerSz = 8
tagsMax = 0x0000ffff
dataMax = 0x0fffffff
sizeMax = 256 * 1024 * 1024
)
b := make([]byte, headerSz)
if _, err := r.ReadAt(b, 0); err != nil {
return fmt.Errorf("header: failed to read: %w", err)
}
tagsCt := binary.BigEndian.Uint32(b[0:])
dataSz := binary.BigEndian.Uint32(b[4:])
if tagsCt > tagsMax {
return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt)
}
if dataSz > dataMax {
return fmt.Errorf("header botch: data length (%d) out of range", dataSz)
}
tagsSz := int64(tagsCt) * entryInfoSize
// Sanity check, if possible:
var inSz int64
switch v := r.(type) {
case interface{ Size() int64 }:
// Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these.
inSz = v.Size()
case io.Seeker:
// Seek if present.
var err error
inSz, err = v.Seek(0, io.SeekEnd)
if err != nil {
return err
}
default:
// Do a read for the end of the segment.
end := preambleSize + tagsSz + int64(dataSz)
if _, err := r.ReadAt(b, end-int64(len(b))); err != nil {
return err
}
inSz = end
}
if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz {
return fmt.Errorf("not enough data")
}
if tagsCt == 0 {
return fmt.Errorf("no tags")
}
h.tags = io.NewSectionReader(r, headerSz, tagsSz)
h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz))
h.Infos = make([]EntryInfo, tagsCt)
return nil
}
// ErrNoRegion is a signal back from verifyRegion that the first tag is not one
// of the expected ones.
//
// This being reported means that the region verification has been
// short-circuited.
var errNoRegion = errors.New("no initial region tag, this is probably a bdb database")
func (h *Header) verifyRegion(ctx context.Context) error {
const regionTagCount = 16
region, err := h.loadTag(ctx, 0)
if err != nil {
return err
}
switch region.Tag {
case TagHeaderSignatures:
case TagHeaderImmutable:
case TagHeaderImage:
default:
return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion)
}
if region.Type != TypeBin || region.count != regionTagCount {
return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count)
}
if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) {
return fmt.Errorf("nonsense region offset")
}
var trailer EntryInfo
b := make([]byte, entryInfoSize)
if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil {
return err
}
if err := trailer.UnmarshalBinary(b); err != nil {
return err
}
rDataLen := region.offset + regionTagCount
trailer.offset = -trailer.offset // trailer offset is negative and special
rIdxLen := trailer.offset / entryInfoSize
// Fixup copied out of librpm:
if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage {
trailer.Tag = TagHeaderSignatures
}
if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount {
return fmt.Errorf("bad region trailer: %v", trailer)
}
if (trailer.offset%entryInfoSize != 0) ||
int64(rIdxLen) > h.tags.Size() ||
int64(rDataLen) > h.data.Size() {
return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d",
region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size())
}
h.region = region.Tag
return nil
}
// VerifyInfo verifies the "info" segments in the header.
//
// Experimentally, bdb database aren't always sorted the expected way. The
// passed boolean controls whether this method uses lax verification or not.
func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error {
lim := len(h.Infos)
typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage
var prev int32
start := 1
if isBDB {
start--
}
for i := start; i < lim; i++ {
e, err := h.loadTag(ctx, i)
if err != nil {
return err
}
switch {
case prev > e.offset:
return fmt.Errorf("botched entry: prev > offset (%d > %d)", prev, e.offset)
case e.Tag < TagHeaderI18nTable && !isBDB:
return fmt.Errorf("botched entry: bad tag %v (%[1]d < %d)", e.Tag, TagHeaderI18nTable)
case e.Type < TypeMin || e.Type > TypeMax:
return fmt.Errorf("botched entry: bad type %v", e.Type)
case e.count == 0 || int64(e.count) > h.data.Size():
return fmt.Errorf("botched entry: bad count %d", e.count)
case (e.Type.alignment()-1)&e.offset != 0:
return fmt.Errorf("botched entry: weird alignment: type alignment %d, offset %d", e.Type.alignment(), e.offset)
case e.offset < 0 || int64(e.offset) > h.data.Size():
return fmt.Errorf("botched entry: bad offset %d", e.offset)
case typecheck && !checkTagType(e.Tag, e.Type):
return fmt.Errorf("botched entry: typecheck fail: %v is not %v", e.Tag, e.Type)
}
}
return nil
}
func checkTagType(key Tag, typ Kind) bool | {
if i, ok := tagByValue[key]; ok {
t := tagTable[i].Type
// Check the type. Some versions of string are typed incorrectly in a
// compatible way.
return t == typ || t.class() == typ.class()
}
// Unknown tags get a pass.
return true
} | identifier_body |
|
header.go | .Errorf("rpm: failed to parse header: %w", err)
}
return nil
}
// ReadData returns a copy of the data indicated by the passed EntryInfo.
//
// If an error is not reported, the returned interface{} is the type indicated by the
// EntryInfo's "Type" member.
//
// NB The TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64, and TypeI18nString
// all return slices.
func (h *Header) ReadData(ctx context.Context, e *EntryInfo) (interface{}, error) {
// TODO(hank) Provide a generic function like `func[T any](*Header, *EntryInfo) T` to do this.
switch e.Type {
case TypeBin:
if /* is region */ false {
return nil, errors.New("todo: handle region tags")
}
b := make([]byte, e.count)
if _, err := h.data.ReadAt(b, int64(e.offset)); err != nil {
return nil, fmt.Errorf("rpm: header: error reading binary: %w", err)
}
return b, nil
case TypeI18nString, TypeStringArray:
sc := bufio.NewScanner(io.NewSectionReader(h.data, int64(e.offset), -1))
sc.Split(splitCString)
s := make([]string, int(e.count))
for i, lim := 0, int(e.count); i < lim && sc.Scan(); i++ {
s[i] = sc.Text()
}
if err := sc.Err(); err != nil {
return nil, fmt.Errorf("rpm: header: error reading string array: %w", err)
}
return s, nil
case TypeString:
// C-terminated string.
r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1))
s, err := r.ReadString(0x00)
if err != nil {
return nil, fmt.Errorf("rpm: header: error reading string: %w", err)
}
// ReadString includes the delimiter, be sure to remove it.
return s[:len(s)-1], nil
case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64:
sr := io.NewSectionReader(h.data, int64(e.offset), -1)
switch e.Type {
case TypeInt64:
r := make([]uint64, int(e.count))
b := make([]byte, 8)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = binary.BigEndian.Uint64(b)
}
return r, nil
case TypeInt32:
r := make([]int32, int(e.count))
b := make([]byte, 4)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = int32(binary.BigEndian.Uint32(b))
}
return r, nil
case TypeInt16:
r := make([]int16, int(e.count))
b := make([]byte, 2)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = int16(binary.BigEndian.Uint16(b))
}
return r, nil
case TypeInt8:
b := make([]byte, int(e.count))
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading int8: %w", err)
}
// Despite byte == uint8 and uint8 being convertible to int8, this is
// the only way I can figure out to avoid an extra copy or using a
// ByteReader, which would just have an internal buffer and be slower.
r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b))
return r, nil
case TypeChar: // Char and Bin are different because they're offset differently.
r := make([]byte, int(e.count))
if _, err := sr.ReadAt(r, 0); err != nil {
return nil, fmt.Errorf("rpm: header: error reading char: %w", err)
}
return r, nil
}
panic("unreachable")
default:
}
return nil, fmt.Errorf("unknown type: %v", e.Type)
}
// SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1).
func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\x00'); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error {
const (
headerSz = 8
tagsMax = 0x0000ffff
dataMax = 0x0fffffff
sizeMax = 256 * 1024 * 1024
)
b := make([]byte, headerSz)
if _, err := r.ReadAt(b, 0); err != nil {
return fmt.Errorf("header: failed to read: %w", err)
}
tagsCt := binary.BigEndian.Uint32(b[0:])
dataSz := binary.BigEndian.Uint32(b[4:])
if tagsCt > tagsMax |
if dataSz > dataMax {
return fmt.Errorf("header botch: data length (%d) out of range", dataSz)
}
tagsSz := int64(tagsCt) * entryInfoSize
// Sanity check, if possible:
var inSz int64
switch v := r.(type) {
case interface{ Size() int64 }:
// Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these.
inSz = v.Size()
case io.Seeker:
// Seek if present.
var err error
inSz, err = v.Seek(0, io.SeekEnd)
if err != nil {
return err
}
default:
// Do a read for the end of the segment.
end := preambleSize + tagsSz + int64(dataSz)
if _, err := r.ReadAt(b, end-int64(len(b))); err != nil {
return err
}
inSz = end
}
if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz {
return fmt.Errorf("not enough data")
}
if tagsCt == 0 {
return fmt.Errorf("no tags")
}
h.tags = io.NewSectionReader(r, headerSz, tagsSz)
h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz))
h.Infos = make([]EntryInfo, tagsCt)
return nil
}
// ErrNoRegion is a signal back from verifyRegion that the first tag is not one
// of the expected ones.
//
// This being reported means that the region verification has been
// short-circuited.
var errNoRegion = errors.New("no initial region tag, this is probably a bdb database")
func (h *Header) verifyRegion(ctx context.Context) error {
const regionTagCount = 16
region, err := h.loadTag(ctx, 0)
if err != nil {
return err
}
switch region.Tag {
case TagHeaderSignatures:
case TagHeaderImmutable:
case TagHeaderImage:
default:
return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion)
}
if region.Type != TypeBin || region.count != regionTagCount {
return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count)
}
if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) {
return fmt.Errorf("nonsense region offset")
}
var trailer EntryInfo
b := make([]byte, entryInfoSize)
if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil {
return err
}
if err := trailer.UnmarshalBinary(b); err != nil {
return err
}
rDataLen := region.offset + regionTagCount
trailer.offset = -trailer.offset // trailer offset is negative and special
rIdxLen := trailer.offset / | {
return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt)
} | conditional_block |
header.go | ++ {
s[i] = sc.Text()
}
if err := sc.Err(); err != nil {
return nil, fmt.Errorf("rpm: header: error reading string array: %w", err)
}
return s, nil
case TypeString:
// C-terminated string.
r := bufio.NewReader(io.NewSectionReader(h.data, int64(e.offset), -1))
s, err := r.ReadString(0x00)
if err != nil {
return nil, fmt.Errorf("rpm: header: error reading string: %w", err)
}
// ReadString includes the delimiter, be sure to remove it.
return s[:len(s)-1], nil
case TypeChar, TypeInt8, TypeInt16, TypeInt32, TypeInt64:
sr := io.NewSectionReader(h.data, int64(e.offset), -1)
switch e.Type {
case TypeInt64:
r := make([]uint64, int(e.count))
b := make([]byte, 8)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = binary.BigEndian.Uint64(b)
}
return r, nil
case TypeInt32:
r := make([]int32, int(e.count))
b := make([]byte, 4)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = int32(binary.BigEndian.Uint32(b))
}
return r, nil
case TypeInt16:
r := make([]int16, int(e.count))
b := make([]byte, 2)
for i := range r {
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading %T: %w", r[0], err)
}
r[i] = int16(binary.BigEndian.Uint16(b))
}
return r, nil
case TypeInt8:
b := make([]byte, int(e.count))
if _, err := io.ReadFull(sr, b); err != nil {
return nil, fmt.Errorf("rpm: header: error reading int8: %w", err)
}
// Despite byte == uint8 and uint8 being convertible to int8, this is
// the only way I can figure out to avoid an extra copy or using a
// ByteReader, which would just have an internal buffer and be slower.
r := unsafe.Slice((*int8)(unsafe.Pointer(&b[0])), len(b))
return r, nil
case TypeChar: // Char and Bin are different because they're offset differently.
r := make([]byte, int(e.count))
if _, err := sr.ReadAt(r, 0); err != nil {
return nil, fmt.Errorf("rpm: header: error reading char: %w", err)
}
return r, nil
}
panic("unreachable")
default:
}
return nil, fmt.Errorf("unknown type: %v", e.Type)
}
// SplitCString is a [bufio.SplitFunc] that splits at NUL, much like strings(1).
func splitCString(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\x00'); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
}
func (h *Header) loadArenas(ctx context.Context, r io.ReaderAt) error {
const (
headerSz = 8
tagsMax = 0x0000ffff
dataMax = 0x0fffffff
sizeMax = 256 * 1024 * 1024
)
b := make([]byte, headerSz)
if _, err := r.ReadAt(b, 0); err != nil {
return fmt.Errorf("header: failed to read: %w", err)
}
tagsCt := binary.BigEndian.Uint32(b[0:])
dataSz := binary.BigEndian.Uint32(b[4:])
if tagsCt > tagsMax {
return fmt.Errorf("header botch: number of tags (%d) out of range", tagsCt)
}
if dataSz > dataMax {
return fmt.Errorf("header botch: data length (%d) out of range", dataSz)
}
tagsSz := int64(tagsCt) * entryInfoSize
// Sanity check, if possible:
var inSz int64
switch v := r.(type) {
case interface{ Size() int64 }:
// Check for Size method. [ioSectionReader]s and [byte.Buffer]s have these.
inSz = v.Size()
case io.Seeker:
// Seek if present.
var err error
inSz, err = v.Seek(0, io.SeekEnd)
if err != nil {
return err
}
default:
// Do a read for the end of the segment.
end := preambleSize + tagsSz + int64(dataSz)
if _, err := r.ReadAt(b, end-int64(len(b))); err != nil {
return err
}
inSz = end
}
if sz := preambleSize + tagsSz + int64(dataSz); sz >= sizeMax || sz != inSz {
return fmt.Errorf("not enough data")
}
if tagsCt == 0 {
return fmt.Errorf("no tags")
}
h.tags = io.NewSectionReader(r, headerSz, tagsSz)
h.data = io.NewSectionReader(r, headerSz+tagsSz, int64(dataSz))
h.Infos = make([]EntryInfo, tagsCt)
return nil
}
// ErrNoRegion is a signal back from verifyRegion that the first tag is not one
// of the expected ones.
//
// This being reported means that the region verification has been
// short-circuited.
var errNoRegion = errors.New("no initial region tag, this is probably a bdb database")
func (h *Header) verifyRegion(ctx context.Context) error {
const regionTagCount = 16
region, err := h.loadTag(ctx, 0)
if err != nil {
return err
}
switch region.Tag {
case TagHeaderSignatures:
case TagHeaderImmutable:
case TagHeaderImage:
default:
return fmt.Errorf("region tag not found, got %v: %w", region.Tag, errNoRegion)
}
if region.Type != TypeBin || region.count != regionTagCount {
return fmt.Errorf("nonsense region tag: %v, count: %d", region.Type, region.count)
}
if off := region.offset + regionTagCount; off < 0 || off > int32(h.data.Size()) {
return fmt.Errorf("nonsense region offset")
}
var trailer EntryInfo
b := make([]byte, entryInfoSize)
if _, err := h.data.ReadAt(b, int64(region.offset)); err != nil {
return err
}
if err := trailer.UnmarshalBinary(b); err != nil {
return err
}
rDataLen := region.offset + regionTagCount
trailer.offset = -trailer.offset // trailer offset is negative and special
rIdxLen := trailer.offset / entryInfoSize
// Fixup copied out of librpm:
if region.Tag == TagHeaderSignatures && trailer.Tag == TagHeaderImage {
trailer.Tag = TagHeaderSignatures
}
if trailer.Tag != region.Tag || trailer.Type != TypeRegionTag || trailer.count != regionTagCount {
return fmt.Errorf("bad region trailer: %v", trailer)
}
if (trailer.offset%entryInfoSize != 0) ||
int64(rIdxLen) > h.tags.Size() ||
int64(rDataLen) > h.data.Size() {
return fmt.Errorf("region %d size incorrect: ril %d il %d rdl %d dl %d",
region.Tag, rIdxLen, h.tags.Size(), rDataLen, h.data.Size())
}
h.region = region.Tag
return nil
}
// VerifyInfo verifies the "info" segments in the header.
//
// Experimentally, bdb database aren't always sorted the expected way. The
// passed boolean controls whether this method uses lax verification or not.
func (h *Header) verifyInfo(ctx context.Context, isBDB bool) error {
lim := len(h.Infos)
typecheck := h.region == TagHeaderImmutable || h.region == TagHeaderImage
var prev int32
start := 1
if isBDB {
start--
}
for i := start; i < lim; i++ { | e, err := h.loadTag(ctx, i)
if err != nil {
return err | random_line_split |
|
profiles.go | LOGSTASH_PROFILE"
OriginProfileType = "ORG_PROFILE"
// RiakProfileType is the type of a Profile used on the legacy RiakKV system
// which used to be used as a back-end for Traffic Vault.
//
// Deprecated: Support for Riak as a Traffic Vault back-end is being dropped
// in the near future. Profiles of type UnknownProfileType should be used on
// PostgreSQL database servers instead.
RiakProfileType = "RIAK_PROFILE"
SplunkProfileType = "SPLUNK_PROFILE"
TrafficMonitorProfileType = "TM_PROFILE"
TrafficPortalProfileType = "TP_PROFILE"
TrafficRouterProfileType = "TR_PROFILE"
TrafficStatsProfileType = "TS_PROFILE"
UnkownProfileType = "UNK_PROFILE"
)
// ProfilesResponse is a list of profiles returned by GET requests.
type ProfilesResponse struct {
Response []Profile `json:"response"`
Alerts
}
// ProfileResponse is a single Profile Response for Update and Create to depict what changed
// swagger:response ProfileResponse
// in: body
type ProfileResponse struct {
// in: body
Response Profile `json:"response"`
Alerts
}
// A Profile represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned.
type Profile struct {
ID int `json:"id" db:"id"`
LastUpdated TimeNoMod `json:"lastUpdated"`
Name string `json:"name"`
Parameter string `json:"param"`
Description string `json:"description"`
CDNName string `json:"cdnName"`
CDNID int `json:"cdn"`
RoutingDisabled bool `json:"routingDisabled"`
Type string `json:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfilesResponseV5 is a list of profiles returned by GET requests.
type ProfilesResponseV5 struct {
Response []ProfileV5 `json:"response"`
Alerts
}
// A ProfileV5 represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned. Note: Field LastUpdated represents RFC3339
type ProfileV5 struct {
ID int `json:"id" db:"id"`
LastUpdated time.Time `json:"lastUpdated" db:"last_updated"`
Name string `json:"name" db:"name"`
Description string `json:"description" db:"description"`
CDNName string `json:"cdnName" db:"cdn_name"`
CDNID int `json:"cdn" db:"cdn"`
RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"`
Type string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileNullable is exactly the same as Profile except that its fields are
// reference values, so they may be nil.
type ProfileNullable struct {
ID *int `json:"id" db:"id"`
LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"`
Name *string `json:"name" db:"name"`
Description *string `json:"description" db:"description"`
CDNName *string `json:"cdnName" db:"cdn_name"`
CDNID *int `json:"cdn" db:"cdn"`
RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"`
Type *string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileCopy contains details about the profile created from an existing profile.
type ProfileCopy struct {
ID int `json:"id"`
Name string `json:"name"`
ExistingID int `json:"idCopyFrom"`
ExistingName string `json:"profileCopyFrom"`
Description string `json:"description"`
}
// ProfileCopyResponse represents the Traffic Ops API's response when a Profile
// is copied.
type ProfileCopyResponse struct {
Response ProfileCopy `json:"response"`
Alerts
}
// ProfileExportImportNullable is an object of the form used by Traffic Ops
// to represent exported and imported profiles.
type ProfileExportImportNullable struct {
Name *string `json:"name"`
Description *string `json:"description"`
CDNName *string `json:"cdn"`
Type *string `json:"type"`
}
// ProfileExportResponse is an object of the form used by Traffic Ops
// to represent exported profile response.
type ProfileExportResponse struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
Alerts
}
// ProfileImportRequest is an object of the form used by Traffic Ops
// to represent a request to import a profile.
type ProfileImportRequest struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
}
// ProfileImportResponse is an object of the form used by Traffic Ops
// to represent a response from importing a profile.
type ProfileImportResponse struct {
Response ProfileImportResponseObj `json:"response"`
Alerts
}
// ProfileImportResponseObj contains data about the profile being imported.
type ProfileImportResponseObj struct {
ProfileExportImportNullable
ID *int `json:"id"`
}
// Validate validates an profile import request, implementing the
// github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator
// interface.
func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error {
profile := profileImport.Profile
// Profile fields are valid
errs := tovalidate.ToErrors(validation.Errors{
"name": validation.Validate(profile.Name, validation.By(
func(value interface{}) error {
name, ok := value.(*string)
if !ok {
return fmt.Errorf("wrong type, need: string, got: %T", value)
}
if name == nil || *name == "" {
return errors.New("required and cannot be blank")
}
if strings.Contains(*name, " ") {
return errors.New("cannot contain spaces")
}
return nil
},
)),
"description": validation.Validate(profile.Description, validation.Required),
"cdnName": validation.Validate(profile.CDNName, validation.Required),
"type": validation.Validate(profile.Type, validation.Required),
})
// Validate CDN exist
if profile.CDNName != nil {
if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil {
errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if !ok {
errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName))
}
}
// Validate profile does not already exist
if profile.Name != nil {
if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil {
errString := fmt.Sprintf("checking profile name %v existence", *profile.Name)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if ok {
errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name))
}
}
// Validate all parameters
// export/import does not include secure flag
// default value to not flag on validation
secure := 1
for i, pp := range profileImport.Parameters {
if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 {
for _, err := range ppErrs {
errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error()))
}
}
}
if len(errs) > 0 {
return util.JoinErrs(errs)
}
return nil
}
// ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error.
// TODO move to helper package.
func ProfilesExistByIDs(ids []int64, tx *sql.Tx) (bool, error) {
count := 0
if err := tx.QueryRow(`SELECT count(*) from profile where id = ANY($1)`, pq.Array(ids)).Scan(&count); err != nil {
return false, errors.New("querying profiles existence from id: " + err.Error())
}
return count == len(ids), nil
}
// ProfileExistsByID returns whether a profile with the given id exists, and any error.
// TODO move to helper package.
func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error) | {
count := 0
if err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil {
return false, errors.New("querying profile existence from id: " + err.Error())
}
return count > 0, nil
} | identifier_body |
|
profiles.go | , WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"database/sql"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc/tovalidate"
"github.com/apache/trafficcontrol/lib/go-util"
validation "github.com/go-ozzo/ozzo-validation"
"github.com/lib/pq"
)
// These are the valid values for the Type property of a Profile. No other
// values will be accepted, and these are not configurable.
const (
CacheServerProfileType = "ATS_PROFILE"
DeliveryServiceProfileType = "DS_PROFILE"
ElasticSearchProfileType = "ES_PROFILE"
GroveProfileType = "GROVE_PROFILE"
InfluxdbProfileType = "INFLUXDB_PROFILE"
KafkaProfileType = "KAFKA_PROFILE"
LogstashProfileType = "LOGSTASH_PROFILE"
OriginProfileType = "ORG_PROFILE"
// RiakProfileType is the type of a Profile used on the legacy RiakKV system
// which used to be used as a back-end for Traffic Vault.
//
// Deprecated: Support for Riak as a Traffic Vault back-end is being dropped
// in the near future. Profiles of type UnknownProfileType should be used on
// PostgreSQL database servers instead.
RiakProfileType = "RIAK_PROFILE"
SplunkProfileType = "SPLUNK_PROFILE"
TrafficMonitorProfileType = "TM_PROFILE"
TrafficPortalProfileType = "TP_PROFILE"
TrafficRouterProfileType = "TR_PROFILE"
TrafficStatsProfileType = "TS_PROFILE"
UnkownProfileType = "UNK_PROFILE"
)
// ProfilesResponse is a list of profiles returned by GET requests.
type ProfilesResponse struct {
Response []Profile `json:"response"`
Alerts
}
// ProfileResponse is a single Profile Response for Update and Create to depict what changed
// swagger:response ProfileResponse
// in: body
type ProfileResponse struct {
// in: body
Response Profile `json:"response"`
Alerts
}
// A Profile represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned.
type Profile struct {
ID int `json:"id" db:"id"`
LastUpdated TimeNoMod `json:"lastUpdated"`
Name string `json:"name"`
Parameter string `json:"param"`
Description string `json:"description"`
CDNName string `json:"cdnName"`
CDNID int `json:"cdn"`
RoutingDisabled bool `json:"routingDisabled"`
Type string `json:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfilesResponseV5 is a list of profiles returned by GET requests.
type ProfilesResponseV5 struct {
Response []ProfileV5 `json:"response"`
Alerts
}
// A ProfileV5 represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned. Note: Field LastUpdated represents RFC3339
type ProfileV5 struct {
ID int `json:"id" db:"id"`
LastUpdated time.Time `json:"lastUpdated" db:"last_updated"`
Name string `json:"name" db:"name"`
Description string `json:"description" db:"description"`
CDNName string `json:"cdnName" db:"cdn_name"`
CDNID int `json:"cdn" db:"cdn"`
RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"`
Type string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileNullable is exactly the same as Profile except that its fields are
// reference values, so they may be nil.
type ProfileNullable struct {
ID *int `json:"id" db:"id"`
LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"`
Name *string `json:"name" db:"name"`
Description *string `json:"description" db:"description"`
CDNName *string `json:"cdnName" db:"cdn_name"`
CDNID *int `json:"cdn" db:"cdn"`
RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"`
Type *string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileCopy contains details about the profile created from an existing profile.
type ProfileCopy struct {
ID int `json:"id"`
Name string `json:"name"`
ExistingID int `json:"idCopyFrom"`
ExistingName string `json:"profileCopyFrom"`
Description string `json:"description"`
}
// ProfileCopyResponse represents the Traffic Ops API's response when a Profile
// is copied.
type ProfileCopyResponse struct {
Response ProfileCopy `json:"response"`
Alerts
}
// ProfileExportImportNullable is an object of the form used by Traffic Ops
// to represent exported and imported profiles.
type ProfileExportImportNullable struct {
Name *string `json:"name"`
Description *string `json:"description"`
CDNName *string `json:"cdn"`
Type *string `json:"type"`
}
// ProfileExportResponse is an object of the form used by Traffic Ops
// to represent exported profile response.
type ProfileExportResponse struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
Alerts
}
// ProfileImportRequest is an object of the form used by Traffic Ops
// to represent a request to import a profile.
type ProfileImportRequest struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
}
// ProfileImportResponse is an object of the form used by Traffic Ops
// to represent a response from importing a profile.
type ProfileImportResponse struct {
Response ProfileImportResponseObj `json:"response"`
Alerts
}
// ProfileImportResponseObj contains data about the profile being imported.
type ProfileImportResponseObj struct {
ProfileExportImportNullable
ID *int `json:"id"`
}
// Validate validates an profile import request, implementing the
// github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator
// interface.
func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error {
profile := profileImport.Profile
// Profile fields are valid
errs := tovalidate.ToErrors(validation.Errors{
"name": validation.Validate(profile.Name, validation.By(
func(value interface{}) error {
name, ok := value.(*string)
if !ok {
return fmt.Errorf("wrong type, need: string, got: %T", value)
}
if name == nil || *name == "" {
return errors.New("required and cannot be blank")
}
if strings.Contains(*name, " ") {
return errors.New("cannot contain spaces")
}
return nil
},
)),
"description": validation.Validate(profile.Description, validation.Required),
"cdnName": validation.Validate(profile.CDNName, validation.Required),
"type": validation.Validate(profile.Type, validation.Required),
})
// Validate CDN exist
if profile.CDNName != nil |
// Validate profile does not already exist
if profile.Name != nil {
if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil {
errString := fmt.Sprintf("checking profile name %v existence", *profile.Name)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if ok {
errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name))
}
}
// Validate all parameters
// export/import does not include secure flag
// default value to not flag on validation
secure := 1
for i, pp := range profileImport.Parameters {
if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 {
for _, err := range ppErrs {
errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error()))
}
}
}
if len(errs) > 0 {
return util.JoinErrs(errs)
}
return | {
if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil {
errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if !ok {
errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName))
}
} | conditional_block |
profiles.go | BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"database/sql"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc/tovalidate"
"github.com/apache/trafficcontrol/lib/go-util"
validation "github.com/go-ozzo/ozzo-validation"
"github.com/lib/pq"
)
// These are the valid values for the Type property of a Profile. No other
// values will be accepted, and these are not configurable.
const (
CacheServerProfileType = "ATS_PROFILE"
DeliveryServiceProfileType = "DS_PROFILE"
ElasticSearchProfileType = "ES_PROFILE"
GroveProfileType = "GROVE_PROFILE"
InfluxdbProfileType = "INFLUXDB_PROFILE"
KafkaProfileType = "KAFKA_PROFILE"
LogstashProfileType = "LOGSTASH_PROFILE"
OriginProfileType = "ORG_PROFILE"
// RiakProfileType is the type of a Profile used on the legacy RiakKV system
// which used to be used as a back-end for Traffic Vault.
//
// Deprecated: Support for Riak as a Traffic Vault back-end is being dropped
// in the near future. Profiles of type UnknownProfileType should be used on
// PostgreSQL database servers instead.
RiakProfileType = "RIAK_PROFILE"
SplunkProfileType = "SPLUNK_PROFILE"
TrafficMonitorProfileType = "TM_PROFILE"
TrafficPortalProfileType = "TP_PROFILE"
TrafficRouterProfileType = "TR_PROFILE"
TrafficStatsProfileType = "TS_PROFILE"
UnkownProfileType = "UNK_PROFILE"
)
// ProfilesResponse is a list of profiles returned by GET requests.
type ProfilesResponse struct {
Response []Profile `json:"response"`
Alerts
}
// ProfileResponse is a single Profile Response for Update and Create to depict what changed
// swagger:response ProfileResponse
// in: body
type ProfileResponse struct {
// in: body
Response Profile `json:"response"`
Alerts
}
// A Profile represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned.
type Profile struct {
ID int `json:"id" db:"id"`
LastUpdated TimeNoMod `json:"lastUpdated"`
Name string `json:"name"`
Parameter string `json:"param"`
Description string `json:"description"`
CDNName string `json:"cdnName"`
CDNID int `json:"cdn"`
RoutingDisabled bool `json:"routingDisabled"`
Type string `json:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfilesResponseV5 is a list of profiles returned by GET requests.
type ProfilesResponseV5 struct {
Response []ProfileV5 `json:"response"`
Alerts
}
// A ProfileV5 represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned. Note: Field LastUpdated represents RFC3339
type ProfileV5 struct {
ID int `json:"id" db:"id"`
LastUpdated time.Time `json:"lastUpdated" db:"last_updated"`
Name string `json:"name" db:"name"`
Description string `json:"description" db:"description"`
CDNName string `json:"cdnName" db:"cdn_name"`
CDNID int `json:"cdn" db:"cdn"`
RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"`
Type string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileNullable is exactly the same as Profile except that its fields are
// reference values, so they may be nil.
type ProfileNullable struct {
ID *int `json:"id" db:"id"`
LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"`
Name *string `json:"name" db:"name"`
Description *string `json:"description" db:"description"`
CDNName *string `json:"cdnName" db:"cdn_name"`
CDNID *int `json:"cdn" db:"cdn"`
RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"`
Type *string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileCopy contains details about the profile created from an existing profile.
type ProfileCopy struct { | }
// ProfileCopyResponse represents the Traffic Ops API's response when a Profile
// is copied.
type ProfileCopyResponse struct {
Response ProfileCopy `json:"response"`
Alerts
}
// ProfileExportImportNullable is an object of the form used by Traffic Ops
// to represent exported and imported profiles.
type ProfileExportImportNullable struct {
Name *string `json:"name"`
Description *string `json:"description"`
CDNName *string `json:"cdn"`
Type *string `json:"type"`
}
// ProfileExportResponse is an object of the form used by Traffic Ops
// to represent exported profile response.
type ProfileExportResponse struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
Alerts
}
// ProfileImportRequest is an object of the form used by Traffic Ops
// to represent a request to import a profile.
type ProfileImportRequest struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
}
// ProfileImportResponse is an object of the form used by Traffic Ops
// to represent a response from importing a profile.
type ProfileImportResponse struct {
Response ProfileImportResponseObj `json:"response"`
Alerts
}
// ProfileImportResponseObj contains data about the profile being imported.
type ProfileImportResponseObj struct {
ProfileExportImportNullable
ID *int `json:"id"`
}
// Validate validates an profile import request, implementing the
// github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator
// interface.
func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error {
profile := profileImport.Profile
// Profile fields are valid
errs := tovalidate.ToErrors(validation.Errors{
"name": validation.Validate(profile.Name, validation.By(
func(value interface{}) error {
name, ok := value.(*string)
if !ok {
return fmt.Errorf("wrong type, need: string, got: %T", value)
}
if name == nil || *name == "" {
return errors.New("required and cannot be blank")
}
if strings.Contains(*name, " ") {
return errors.New("cannot contain spaces")
}
return nil
},
)),
"description": validation.Validate(profile.Description, validation.Required),
"cdnName": validation.Validate(profile.CDNName, validation.Required),
"type": validation.Validate(profile.Type, validation.Required),
})
// Validate CDN exist
if profile.CDNName != nil {
if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil {
errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if !ok {
errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName))
}
}
// Validate profile does not already exist
if profile.Name != nil {
if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil {
errString := fmt.Sprintf("checking profile name %v existence", *profile.Name)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if ok {
errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name))
}
}
// Validate all parameters
// export/import does not include secure flag
// default value to not flag on validation
secure := 1
for i, pp := range profileImport.Parameters {
if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 {
for _, err := range ppErrs {
errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error()))
}
}
}
if len(errs) > 0 {
return util.JoinErrs(errs)
}
return nil | ID int `json:"id"`
Name string `json:"name"`
ExistingID int `json:"idCopyFrom"`
ExistingName string `json:"profileCopyFrom"`
Description string `json:"description"` | random_line_split |
profiles.go | the License.
*/
import (
"database/sql"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc/tovalidate"
"github.com/apache/trafficcontrol/lib/go-util"
validation "github.com/go-ozzo/ozzo-validation"
"github.com/lib/pq"
)
// These are the valid values for the Type property of a Profile. No other
// values will be accepted, and these are not configurable.
const (
CacheServerProfileType = "ATS_PROFILE"
DeliveryServiceProfileType = "DS_PROFILE"
ElasticSearchProfileType = "ES_PROFILE"
GroveProfileType = "GROVE_PROFILE"
InfluxdbProfileType = "INFLUXDB_PROFILE"
KafkaProfileType = "KAFKA_PROFILE"
LogstashProfileType = "LOGSTASH_PROFILE"
OriginProfileType = "ORG_PROFILE"
// RiakProfileType is the type of a Profile used on the legacy RiakKV system
// which used to be used as a back-end for Traffic Vault.
//
// Deprecated: Support for Riak as a Traffic Vault back-end is being dropped
// in the near future. Profiles of type UnknownProfileType should be used on
// PostgreSQL database servers instead.
RiakProfileType = "RIAK_PROFILE"
SplunkProfileType = "SPLUNK_PROFILE"
TrafficMonitorProfileType = "TM_PROFILE"
TrafficPortalProfileType = "TP_PROFILE"
TrafficRouterProfileType = "TR_PROFILE"
TrafficStatsProfileType = "TS_PROFILE"
UnkownProfileType = "UNK_PROFILE"
)
// ProfilesResponse is a list of profiles returned by GET requests.
type ProfilesResponse struct {
Response []Profile `json:"response"`
Alerts
}
// ProfileResponse is a single Profile Response for Update and Create to depict what changed
// swagger:response ProfileResponse
// in: body
type ProfileResponse struct {
// in: body
Response Profile `json:"response"`
Alerts
}
// A Profile represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned.
type Profile struct {
ID int `json:"id" db:"id"`
LastUpdated TimeNoMod `json:"lastUpdated"`
Name string `json:"name"`
Parameter string `json:"param"`
Description string `json:"description"`
CDNName string `json:"cdnName"`
CDNID int `json:"cdn"`
RoutingDisabled bool `json:"routingDisabled"`
Type string `json:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfilesResponseV5 is a list of profiles returned by GET requests.
type ProfilesResponseV5 struct {
Response []ProfileV5 `json:"response"`
Alerts
}
// A ProfileV5 represents a set of configuration for a server or Delivery Service
// which may be reused to allow sharing configuration across the objects to
// which it is assigned. Note: Field LastUpdated represents RFC3339
type ProfileV5 struct {
ID int `json:"id" db:"id"`
LastUpdated time.Time `json:"lastUpdated" db:"last_updated"`
Name string `json:"name" db:"name"`
Description string `json:"description" db:"description"`
CDNName string `json:"cdnName" db:"cdn_name"`
CDNID int `json:"cdn" db:"cdn"`
RoutingDisabled bool `json:"routingDisabled" db:"routing_disabled"`
Type string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileNullable is exactly the same as Profile except that its fields are
// reference values, so they may be nil.
type ProfileNullable struct {
ID *int `json:"id" db:"id"`
LastUpdated *TimeNoMod `json:"lastUpdated" db:"last_updated"`
Name *string `json:"name" db:"name"`
Description *string `json:"description" db:"description"`
CDNName *string `json:"cdnName" db:"cdn_name"`
CDNID *int `json:"cdn" db:"cdn"`
RoutingDisabled *bool `json:"routingDisabled" db:"routing_disabled"`
Type *string `json:"type" db:"type"`
Parameters []ParameterNullable `json:"params,omitempty"`
}
// ProfileCopy contains details about the profile created from an existing profile.
type ProfileCopy struct {
ID int `json:"id"`
Name string `json:"name"`
ExistingID int `json:"idCopyFrom"`
ExistingName string `json:"profileCopyFrom"`
Description string `json:"description"`
}
// ProfileCopyResponse represents the Traffic Ops API's response when a Profile
// is copied.
type ProfileCopyResponse struct {
Response ProfileCopy `json:"response"`
Alerts
}
// ProfileExportImportNullable is an object of the form used by Traffic Ops
// to represent exported and imported profiles.
type ProfileExportImportNullable struct {
Name *string `json:"name"`
Description *string `json:"description"`
CDNName *string `json:"cdn"`
Type *string `json:"type"`
}
// ProfileExportResponse is an object of the form used by Traffic Ops
// to represent exported profile response.
type ProfileExportResponse struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
Alerts
}
// ProfileImportRequest is an object of the form used by Traffic Ops
// to represent a request to import a profile.
type ProfileImportRequest struct {
// Parameters associated to the profile
//
Profile ProfileExportImportNullable `json:"profile"`
// Parameters associated to the profile
//
Parameters []ProfileExportImportParameterNullable `json:"parameters"`
}
// ProfileImportResponse is an object of the form used by Traffic Ops
// to represent a response from importing a profile.
type ProfileImportResponse struct {
Response ProfileImportResponseObj `json:"response"`
Alerts
}
// ProfileImportResponseObj contains data about the profile being imported.
type ProfileImportResponseObj struct {
ProfileExportImportNullable
ID *int `json:"id"`
}
// Validate validates an profile import request, implementing the
// github.com/apache/trafficcontrol/traffic_ops/traffic_ops_golang/api.ParseValidator
// interface.
func (profileImport *ProfileImportRequest) Validate(tx *sql.Tx) error {
profile := profileImport.Profile
// Profile fields are valid
errs := tovalidate.ToErrors(validation.Errors{
"name": validation.Validate(profile.Name, validation.By(
func(value interface{}) error {
name, ok := value.(*string)
if !ok {
return fmt.Errorf("wrong type, need: string, got: %T", value)
}
if name == nil || *name == "" {
return errors.New("required and cannot be blank")
}
if strings.Contains(*name, " ") {
return errors.New("cannot contain spaces")
}
return nil
},
)),
"description": validation.Validate(profile.Description, validation.Required),
"cdnName": validation.Validate(profile.CDNName, validation.Required),
"type": validation.Validate(profile.Type, validation.Required),
})
// Validate CDN exist
if profile.CDNName != nil {
if ok, err := CDNExistsByName(*profile.CDNName, tx); err != nil {
errString := fmt.Sprintf("checking cdn name %v existence", *profile.CDNName)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if !ok {
errs = append(errs, fmt.Errorf("%v CDN does not exist", *profile.CDNName))
}
}
// Validate profile does not already exist
if profile.Name != nil {
if ok, err := ProfileExistsByName(*profile.Name, tx); err != nil {
errString := fmt.Sprintf("checking profile name %v existence", *profile.Name)
log.Errorf("%v: %v", errString, err.Error())
errs = append(errs, errors.New(errString))
} else if ok {
errs = append(errs, fmt.Errorf("a profile with the name \"%s\" already exists", *profile.Name))
}
}
// Validate all parameters
// export/import does not include secure flag
// default value to not flag on validation
secure := 1
for i, pp := range profileImport.Parameters {
if ppErrs := validateProfileParamPostFields(pp.ConfigFile, pp.Name, pp.Value, &secure); len(ppErrs) > 0 {
for _, err := range ppErrs {
errs = append(errs, errors.New("parameter "+strconv.Itoa(i)+": "+err.Error()))
}
}
}
if len(errs) > 0 {
return util.JoinErrs(errs)
}
return nil
}
// ProfilesExistByIDs returns whether profiles exist for all the given ids, and any error.
// TODO move to helper package.
func | ProfilesExistByIDs | identifier_name |
|
lib.rs | //! 2. Ensure all subsequent reads to the memory following the zeroing operation
//! will always see zeroes.
//!
//! This crate guarantees #1 is true: LLVM's volatile semantics ensure it.
//!
//! The story around #2 is much more complicated. In brief, it should be true that
//! LLVM's current implementation does not attempt to perform optimizations which
//! would allow a subsequent (non-volatile) read to see the original value prior
//! to zeroization. However, this is not a guarantee, but rather an LLVM
//! implementation detail.
//!
//! For more background, we can look to the [core::ptr::write_volatile]
//! documentation:
//!
//! > Volatile operations are intended to act on I/O memory, and are guaranteed
//! > to not be elided or reordered by the compiler across other volatile
//! > operations.
//! >
//! > Memory accessed with `read_volatile` or `write_volatile` should not be
//! > accessed with non-volatile operations.
//!
//! Uhoh! This crate does not guarantee all reads to the memory it operates on
//! are volatile, and the documentation for [core::ptr::write_volatile]
//! explicitly warns against mixing volatile and non-volatile operations.
//! Perhaps we'd be better off with something like a `VolatileCell`
//! type which owns the associated data and ensures all reads and writes are
//! volatile so we don't have to worry about the semantics of mixing volatile and
//! non-volatile accesses.
//!
//! While that's a strategy worth pursuing (and something we may investigate
//! separately from this crate), it comes with some onerous API requirements:
//! it means any data that we might ever desire to zero is owned by a
//! `VolatileCell`. However, this does not make it possible for this crate
//! to act on references, which severely limits its applicability. In fact
//! a `VolatileCell` can only act on values, i.e. to read a value from it,
//! we'd need to make a copy of it, and that's literally the opposite of
//! what we want.
//!
//! It's worth asking what the precise semantics of mixing volatile and
//! non-volatile reads actually are, and whether a less obtrusive API which
//! can act entirely on mutable references is possible, safe, and provides the
//! desired behavior.
//!
//! Unfortunately, that's a tricky question, because
//! [Rust does not have a formally defined memory model][memory-model],
//! and the behavior of mixing volatile and non-volatile memory accesses is
//! therefore not rigorously specified and winds up being an LLVM
//! implementation detail. The semantics were discussed extensively in this
//! thread, specifically in the context of zeroing secrets from memory:
//!
//! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24>
//!
//! Some notable details from this thread:
//!
//! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not
//! the data itself. Specifically it maps to flags in LLVM IR which control
//! the behavior of the optimizer, and is therefore a bit different from the
//! typical C notion of "volatile".
//! - As mentioned earlier, LLVM does not presently contain optimizations which
//! would reorder a non-volatile read to occur before a volatile write.
//! However, there is nothing precluding such optimizations from being added.
//! LLVM presently appears to exhibit the desired behavior for both points
//! #1 and #2 above, but there is nothing preventing future versions of Rust
//! and/or LLVM from changing that.
//!
//! To help mitigate concerns about reordering potentially exposing secrets
//! after they have been zeroed, this crate leverages the [core::sync::atomic]
//! memory fence functions including [compiler_fence] and [fence] (which uses
//! the CPU's native fence instructions). These fences are leveraged with the
//! strictest ordering guarantees, [Ordering::SeqCst], which ensures no
//! accesses are reordered. Without a formally defined memory model we can't
//! guarantee these will be effective, but we hope they will cover most cases.
//!
//! Concretely the threat of leaking "zeroized" secrets (via reordering by
//! LLVM and/or the CPU via out-of-order or speculative execution) would
//! require a non-volatile access to be reordered ahead of the following:
//!
//! 1. before an [Ordering::SeqCst] compiler fence
//! 2. before an [Ordering::SeqCst] runtime fence
//! 3. before a volatile write
//!
//! This seems unlikely, but our usage of mixed non-volatile and volatile
//! accesses is technically undefined behavior, at least until guarantees
//! about this particular mixture of operations is formally defined in a
//! Rust memory model.
//!
//! Furthermore, given the recent history of microarchitectural attacks
//! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets
//! to be leaked through covert channels (e.g. memory fences have been used
//! as a covert channel), so we are wary to make guarantees unless they can
//! be made firmly in terms of both a formal Rust memory model and the
//! generated code for a particular CPU architecture.
//!
//! In conclusion, this crate guarantees the zeroize operation will not be
//! elided or "optimized away", makes a "best effort" to ensure that
//! memory accesses will not be reordered ahead of the "zeroize" operation,
//! but **cannot** yet guarantee that such reordering will not occur.
//!
//! ## Stack/Heap Zeroing Notes
//!
//! This crate can be used to zero values from either the stack or the heap.
//!
//! However, be aware that Rust's current memory semantics (e.g. `Copy` types)
//! can leave copies of data in memory, and there isn't presently a good solution
//! for ensuring all copies of data on the stack are properly cleared.
//!
//! The [`Pin` RFC][pin] proposes a method for avoiding this.
//!
//! ## What about: clearing registers, mlock, mprotect, etc?
//!
//! This crate is laser-focused on being a simple, unobtrusive crate for zeroing
//! memory in as reliable a manner as is possible on stable Rust.
//!
//! Clearing registers is a difficult problem that can't easily be solved by
//! something like a crate, and requires either inline ASM or rustc support.
//! See <https://github.com/rust-lang/rust/issues/17046> for background on
//! this particular problem.
//!
//! Other memory protection mechanisms are interesting and useful, but often
//! overkill (e.g. defending against RAM scraping or attackers with swap access).
//! In as much as there may be merit to these approaches, there are also many
//! other crates that already implement more sophisticated memory protections.
//! Such protections are explicitly out-of-scope for this crate.
//!
//! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote
//! it in the most unobtrusive manner possible. This includes omitting complex
//! `unsafe` memory protection systems and just trying to make the best memory
//! zeroing crate available.
//!
//! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html
//! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html
//! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear
//! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear
//! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html
//! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html
//! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html
//! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html
//! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst
//! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html
//! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html
//! [memory-model]: https://github.com/nikomatsakis/rust-memory-model
//! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md
//! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data
#![no_std]
#![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)]
#![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")]
#[cfg(any(feature = "std", test))]
#[cfg_attr(test, macro_use)]
extern crate std;
#[cfg(feature = "zeroize_derive")]
#[allow(unused_imports)]
#[macro_use]
extern crate zeroize_der | //! ## What guarantees does this crate provide?
//!
//! Ideally a secure memory-zeroing function would guarantee the following:
//!
//! 1. Ensure the zeroing operation can't be "optimized away" by the compiler. | random_line_split |
|
lib.rs | effort" to ensure that
//! memory accesses will not be reordered ahead of the "zeroize" operation,
//! but **cannot** yet guarantee that such reordering will not occur.
//!
//! ## Stack/Heap Zeroing Notes
//!
//! This crate can be used to zero values from either the stack or the heap.
//!
//! However, be aware that Rust's current memory semantics (e.g. `Copy` types)
//! can leave copies of data in memory, and there isn't presently a good solution
//! for ensuring all copies of data on the stack are properly cleared.
//!
//! The [`Pin` RFC][pin] proposes a method for avoiding this.
//!
//! ## What about: clearing registers, mlock, mprotect, etc?
//!
//! This crate is laser-focused on being a simple, unobtrusive crate for zeroing
//! memory in as reliable a manner as is possible on stable Rust.
//!
//! Clearing registers is a difficult problem that can't easily be solved by
//! something like a crate, and requires either inline ASM or rustc support.
//! See <https://github.com/rust-lang/rust/issues/17046> for background on
//! this particular problem.
//!
//! Other memory protection mechanisms are interesting and useful, but often
//! overkill (e.g. defending against RAM scraping or attackers with swap access).
//! In as much as there may be merit to these approaches, there are also many
//! other crates that already implement more sophisticated memory protections.
//! Such protections are explicitly out-of-scope for this crate.
//!
//! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote
//! it in the most unobtrusive manner possible. This includes omitting complex
//! `unsafe` memory protection systems and just trying to make the best memory
//! zeroing crate available.
//!
//! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html
//! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html
//! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear
//! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear
//! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html
//! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html
//! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html
//! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html
//! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst
//! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html
//! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html
//! [memory-model]: https://github.com/nikomatsakis/rust-memory-model
//! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md
//! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data
#![no_std]
#![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)]
#![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")]
#[cfg(any(feature = "std", test))]
#[cfg_attr(test, macro_use)]
extern crate std;
#[cfg(feature = "zeroize_derive")]
#[allow(unused_imports)]
#[macro_use]
extern crate zeroize_derive;
#[cfg(feature = "zeroize_derive")]
#[doc(hidden)]
pub use zeroize_derive::*;
use core::{ptr, slice::IterMut, sync::atomic};
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::prelude::*;
#[cfg(feature = "std")]
use std::prelude::v1::*;
/// Trait for securely erasing types from memory
pub trait Zeroize {
/// Zero out this object from memory (using Rust or OS intrinsics which
/// ensure the zeroization operation is not "optimized away")
fn zeroize(&mut self);
}
/// Marker trait for types whose `Default` is the desired zeroization result
pub trait DefaultIsZeroes: Copy + Default + Sized {}
/// Marker trait intended for use with `zeroize_derive` which indicates that
/// a type should have a drop handler which calls Zeroize.
///
/// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an
/// associated drop handler.
pub trait ZeroizeOnDrop: Zeroize + Drop {}
impl<Z> Zeroize for Z
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
volatile_set(self, Z::default());
atomic_fence();
}
}
macro_rules! impl_zeroize_with_default {
($($type:ty),+) => {
$(impl DefaultIsZeroes for $type {})+
};
}
impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize);
impl_zeroize_with_default!(u16, u32, u64, u128, usize);
impl_zeroize_with_default!(f32, f64, char, bool);
/// On non-nightly targets, avoid special-casing u8
#[cfg(not(feature = "nightly"))]
impl_zeroize_with_default!(u8);
/// On nightly targets, don't implement `DefaultIsZeroes` so we can special
/// case using batch set operations.
#[cfg(feature = "nightly")]
impl Zeroize for u8 {
fn zeroize(&mut self) {
volatile_set(self, 0);
atomic_fence();
}
}
impl<'a, Z> Zeroize for IterMut<'a, Z>
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
let default = Z::default();
for elem in self {
volatile_set(elem, default);
}
atomic_fence();
}
}
/// Implement zeroize on all types that can be zeroized with the zero value
impl<Z> Zeroize for [Z]
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
// TODO: batch volatile set operation?
self.iter_mut().zeroize();
}
}
/// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing
#[cfg(feature = "nightly")]
impl Zeroize for [u8] {
fn zeroize(&mut self) {
volatile_zero_bytes(self);
atomic_fence();
}
}
#[cfg(feature = "alloc")]
impl<Z> Zeroize for Vec<Z>
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
self.resize(self.capacity(), Default::default());
self.as_mut_slice().zeroize();
self.clear();
}
}
#[cfg(feature = "alloc")]
impl Zeroize for String {
fn zeroize(&mut self) {
unsafe { self.as_bytes_mut() }.zeroize();
debug_assert!(self.as_bytes().iter().all(|b| *b == 0));
self.clear();
}
}
/// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing
#[cfg(feature = "nightly")]
macro_rules! impl_zeroize_for_byte_array {
($($size:expr),+) => {
$(
impl Zeroize for [u8; $size] {
fn zeroize(&mut self) {
volatile_zero_bytes(self.as_mut());
atomic_fence();
}
}
)+
};
}
#[cfg(feature = "nightly")]
impl_zeroize_for_byte_array!(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
);
/// Use fences to prevent accesses from being reordered before this
/// point, which should hopefully help ensure that all accessors
/// see zeroes after this point.
#[inline]
fn | atomic_fence | identifier_name |
|
lib.rs | will not occur.
//!
//! ## Stack/Heap Zeroing Notes
//!
//! This crate can be used to zero values from either the stack or the heap.
//!
//! However, be aware that Rust's current memory semantics (e.g. `Copy` types)
//! can leave copies of data in memory, and there isn't presently a good solution
//! for ensuring all copies of data on the stack are properly cleared.
//!
//! The [`Pin` RFC][pin] proposes a method for avoiding this.
//!
//! ## What about: clearing registers, mlock, mprotect, etc?
//!
//! This crate is laser-focused on being a simple, unobtrusive crate for zeroing
//! memory in as reliable a manner as is possible on stable Rust.
//!
//! Clearing registers is a difficult problem that can't easily be solved by
//! something like a crate, and requires either inline ASM or rustc support.
//! See <https://github.com/rust-lang/rust/issues/17046> for background on
//! this particular problem.
//!
//! Other memory protection mechanisms are interesting and useful, but often
//! overkill (e.g. defending against RAM scraping or attackers with swap access).
//! In as much as there may be merit to these approaches, there are also many
//! other crates that already implement more sophisticated memory protections.
//! Such protections are explicitly out-of-scope for this crate.
//!
//! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote
//! it in the most unobtrusive manner possible. This includes omitting complex
//! `unsafe` memory protection systems and just trying to make the best memory
//! zeroing crate available.
//!
//! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html
//! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html
//! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear
//! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear
//! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html
//! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html
//! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html
//! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html
//! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst
//! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html
//! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html
//! [memory-model]: https://github.com/nikomatsakis/rust-memory-model
//! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md
//! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data
#![no_std]
#![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)]
#![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))]
#![cfg_attr(feature = "nightly", feature(core_intrinsics))]
#![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")]
#[cfg(any(feature = "std", test))]
#[cfg_attr(test, macro_use)]
extern crate std;
#[cfg(feature = "zeroize_derive")]
#[allow(unused_imports)]
#[macro_use]
extern crate zeroize_derive;
#[cfg(feature = "zeroize_derive")]
#[doc(hidden)]
pub use zeroize_derive::*;
use core::{ptr, slice::IterMut, sync::atomic};
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::prelude::*;
#[cfg(feature = "std")]
use std::prelude::v1::*;
/// Trait for securely erasing types from memory
pub trait Zeroize {
/// Zero out this object from memory (using Rust or OS intrinsics which
/// ensure the zeroization operation is not "optimized away")
fn zeroize(&mut self);
}
/// Marker trait for types whose `Default` is the desired zeroization result
pub trait DefaultIsZeroes: Copy + Default + Sized {}
/// Marker trait intended for use with `zeroize_derive` which indicates that
/// a type should have a drop handler which calls Zeroize.
///
/// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an
/// associated drop handler.
pub trait ZeroizeOnDrop: Zeroize + Drop {}
impl<Z> Zeroize for Z
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
volatile_set(self, Z::default());
atomic_fence();
}
}
macro_rules! impl_zeroize_with_default {
($($type:ty),+) => {
$(impl DefaultIsZeroes for $type {})+
};
}
impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize);
impl_zeroize_with_default!(u16, u32, u64, u128, usize);
impl_zeroize_with_default!(f32, f64, char, bool);
/// On non-nightly targets, avoid special-casing u8
#[cfg(not(feature = "nightly"))]
impl_zeroize_with_default!(u8);
/// On nightly targets, don't implement `DefaultIsZeroes` so we can special
/// case using batch set operations.
#[cfg(feature = "nightly")]
impl Zeroize for u8 {
fn zeroize(&mut self) {
volatile_set(self, 0);
atomic_fence();
}
}
impl<'a, Z> Zeroize for IterMut<'a, Z>
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
let default = Z::default();
for elem in self {
volatile_set(elem, default);
}
atomic_fence();
}
}
/// Implement zeroize on all types that can be zeroized with the zero value
impl<Z> Zeroize for [Z]
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
// TODO: batch volatile set operation?
self.iter_mut().zeroize();
}
}
/// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing
#[cfg(feature = "nightly")]
impl Zeroize for [u8] {
fn zeroize(&mut self) {
volatile_zero_bytes(self);
atomic_fence();
}
}
#[cfg(feature = "alloc")]
impl<Z> Zeroize for Vec<Z>
where
Z: DefaultIsZeroes,
{
fn zeroize(&mut self) {
self.resize(self.capacity(), Default::default());
self.as_mut_slice().zeroize();
self.clear();
}
}
#[cfg(feature = "alloc")]
impl Zeroize for String {
fn zeroize(&mut self) {
unsafe { self.as_bytes_mut() }.zeroize();
debug_assert!(self.as_bytes().iter().all(|b| *b == 0));
self.clear();
}
}
/// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing
#[cfg(feature = "nightly")]
macro_rules! impl_zeroize_for_byte_array {
($($size:expr),+) => {
$(
impl Zeroize for [u8; $size] {
fn zeroize(&mut self) {
volatile_zero_bytes(self.as_mut());
atomic_fence();
}
}
)+
};
}
#[cfg(feature = "nightly")]
impl_zeroize_for_byte_array!(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
);
/// Use fences to prevent accesses from being reordered before this
/// point, which should hopefully help ensure that all accessors
/// see zeroes after this point.
#[inline]
fn atomic_fence() | {
atomic::fence(atomic::Ordering::SeqCst);
atomic::compiler_fence(atomic::Ordering::SeqCst);
} | identifier_body |
|
fakes.rs | saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>,
connections_recorded: Mutex<Vec<ConnectionRecord>>,
connect_results_recorded: Mutex<Vec<ConnectResultRecord>>,
lookup_compatible_response: Mutex<LookupCompatibleResponse>,
pub fail_all_stores: bool,
pub active_scan_result_recorded: Arc<Mutex<bool>>,
pub passive_scan_result_recorded: Arc<Mutex<bool>>,
pub past_connections_response: PastConnectionList,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectionRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub data: PastConnectionData,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectResultRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub bssid: client_types::Bssid,
pub connect_result: fidl_sme::ConnectResult,
pub scan_type: client_types::ScanObservation,
}
/// Use a struct so that the option can be updated from None to Some to allow the response to be
/// set after FakeSavedNetworksManager is created. Use an optional response value rather than
/// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic
/// for easier debugging.
struct LookupCompatibleResponse {
inner: Option<Vec<NetworkConfig>>,
}
impl LookupCompatibleResponse {
fn new() -> Self {
LookupCompatibleResponse { inner: None }
}
}
impl FakeSavedNetworksManager {
pub fn new() -> Self {
Self {
saved_networks: Mutex::new(HashMap::new()),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Create FakeSavedNetworksManager, saving network configs with the specified
/// network identifiers and credentials at init.
pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self {
let saved_networks = network_configs
.into_iter()
.filter_map(|(id, cred)| {
NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config]))
})
.collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>();
Self {
saved_networks: Mutex::new(saved_networks),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Returns the past connections as they were recorded, rather than how they would have been
/// stored.
pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> |
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> {
self.connect_results_recorded
.try_lock()
.expect("expect locking self.connect_results_recorded to succeed")
.clone()
}
/// Manually change the hidden network probabiltiy of a saved network.
pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) {
let mut saved_networks = self.saved_networks.lock().await;
let networks = match saved_networks.get_mut(&id) {
Some(networks) => networks,
None => {
info!("Failed to find network to update");
return;
}
};
for network in networks.iter_mut() {
network.hidden_probability = hidden_prob;
}
}
pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) {
self.lookup_compatible_response.try_lock().expect("failed to get lock").inner =
Some(response);
}
}
#[async_trait]
impl SavedNetworksManagerApi for FakeSavedNetworksManager {
async fn remove(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<bool, NetworkConfigError> {
let mut saved_networks = self.saved_networks.lock().await;
if let Some(network_configs) = saved_networks.get_mut(&network_id) {
let original_len = network_configs.len();
network_configs.retain(|cfg| cfg.credential != credential);
if original_len != network_configs.len() {
return Ok(true);
}
}
Ok(false)
}
async fn known_network_count(&self) -> usize {
unimplemented!()
}
async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> {
self.saved_networks.lock().await.get(id).cloned().unwrap_or_default()
}
async fn lookup_compatible(
&self,
ssid: &client_types::Ssid,
_scan_security: client_types::SecurityTypeDetailed,
) -> Vec<NetworkConfig> {
let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone();
match predetermined_response {
Some(resp) => resp,
None => {
warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID");
self.saved_networks
.lock()
.await
.iter()
.filter_map(
|(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None },
)
.flatten()
.collect()
}
}
}
/// Note that the configs-per-NetworkIdentifier limit is set to 1 in
/// this mock struct. If a NetworkIdentifier is already stored, writing
/// a config to it will evict the previously store one.
async fn store(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<Option<NetworkConfig>, NetworkConfigError> {
if self.fail_all_stores {
return Err(NetworkConfigError::StashWriteError);
}
let config = NetworkConfig::new(network_id.clone(), credential, false)?;
return Ok(self
.saved_networks
.lock()
.await
.insert(network_id, vec![config])
.and_then(|mut v| v.pop()));
}
async fn record_connect_result(
&self,
id: NetworkIdentifier,
credential: &Credential,
bssid: client_types::Bssid,
connect_result: fidl_sme::ConnectResult,
scan_type: client_types::ScanObservation,
) {
self.connect_results_recorded.try_lock().expect("failed to record connect result").push(
ConnectResultRecord {
id: id.clone(),
credential: credential.clone(),
bssid,
connect_result,
scan_type,
},
);
}
async fn record_disconnect(
&self,
id: &NetworkIdentifier,
credential: &Credential,
data: PastConnectionData,
) {
let mut connections_recorded = self.connections_recorded.lock().await;
connections_recorded.push(ConnectionRecord {
id: id.clone(),
credential: credential.clone(),
data,
});
}
async fn record_periodic_metrics(&self) {}
async fn record_scan_result(
&self,
scan_type: ScanResultType,
_results: Vec<client_types::NetworkIdentifierDetailed>,
) {
match scan_type {
ScanResultType::Undirected => {
let mut v = self.passive_scan_result_recorded.lock().await;
*v = true;
}
ScanResultType::Directed(_) => {
let mut v = self.active_scan_result_recorded.lock().await;
*v = true
}
}
}
async fn get_networks(&self) -> Vec<NetworkConfig> {
self.saved_networks
.lock()
.await
.values()
.into_iter()
.flat_map(|cfgs| cfgs.clone())
.collect()
}
async fn get_past_connections(
&self,
_id: &NetworkIdentifier,
_credential: &Credential,
_bssid: &client_types::Bssid,
) -> PastConnectionList {
self.past_connections_response.clone()
}
}
pub fn create_wlan_hasher() -> WlanHasher {
WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes())
}
pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) {
const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value
mpsc::channel(DEFAULT_BUFFER_SIZE)
}
/// Create past connection data with all random values. Tests can set the values they care about.
pub fn random_connection_data() -> PastConnectionData {
let mut rng = rand::thread_rng();
let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into());
let time_to_connect = zx::Duration::from_seconds(rng.gen_range::< | {
self.connections_recorded
.try_lock()
.expect("expect locking self.connections_recorded to succeed")
.clone()
} | identifier_body |
fakes.rs | client_types::Bssid,
pub connect_result: fidl_sme::ConnectResult,
pub scan_type: client_types::ScanObservation,
}
/// Use a struct so that the option can be updated from None to Some to allow the response to be
/// set after FakeSavedNetworksManager is created. Use an optional response value rather than
/// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic
/// for easier debugging.
struct LookupCompatibleResponse {
inner: Option<Vec<NetworkConfig>>,
}
impl LookupCompatibleResponse {
fn new() -> Self {
LookupCompatibleResponse { inner: None }
}
}
impl FakeSavedNetworksManager {
pub fn new() -> Self {
Self {
saved_networks: Mutex::new(HashMap::new()),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Create FakeSavedNetworksManager, saving network configs with the specified
/// network identifiers and credentials at init.
pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self {
let saved_networks = network_configs
.into_iter()
.filter_map(|(id, cred)| {
NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config]))
})
.collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>();
Self {
saved_networks: Mutex::new(saved_networks),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Returns the past connections as they were recorded, rather than how they would have been
/// stored.
pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> {
self.connections_recorded
.try_lock()
.expect("expect locking self.connections_recorded to succeed")
.clone()
}
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> {
self.connect_results_recorded
.try_lock()
.expect("expect locking self.connect_results_recorded to succeed")
.clone()
}
/// Manually change the hidden network probabiltiy of a saved network.
pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) {
let mut saved_networks = self.saved_networks.lock().await;
let networks = match saved_networks.get_mut(&id) {
Some(networks) => networks,
None => {
info!("Failed to find network to update");
return;
}
};
for network in networks.iter_mut() {
network.hidden_probability = hidden_prob;
}
}
pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) {
self.lookup_compatible_response.try_lock().expect("failed to get lock").inner =
Some(response);
}
}
#[async_trait]
impl SavedNetworksManagerApi for FakeSavedNetworksManager {
async fn remove(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<bool, NetworkConfigError> {
let mut saved_networks = self.saved_networks.lock().await;
if let Some(network_configs) = saved_networks.get_mut(&network_id) {
let original_len = network_configs.len();
network_configs.retain(|cfg| cfg.credential != credential);
if original_len != network_configs.len() {
return Ok(true);
}
}
Ok(false)
}
async fn known_network_count(&self) -> usize {
unimplemented!()
}
async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> {
self.saved_networks.lock().await.get(id).cloned().unwrap_or_default()
}
async fn lookup_compatible(
&self,
ssid: &client_types::Ssid,
_scan_security: client_types::SecurityTypeDetailed,
) -> Vec<NetworkConfig> {
let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone();
match predetermined_response {
Some(resp) => resp,
None => {
warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID");
self.saved_networks
.lock()
.await
.iter()
.filter_map(
|(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None },
)
.flatten()
.collect()
}
}
}
/// Note that the configs-per-NetworkIdentifier limit is set to 1 in
/// this mock struct. If a NetworkIdentifier is already stored, writing
/// a config to it will evict the previously store one.
async fn store(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<Option<NetworkConfig>, NetworkConfigError> {
if self.fail_all_stores {
return Err(NetworkConfigError::StashWriteError);
}
let config = NetworkConfig::new(network_id.clone(), credential, false)?;
return Ok(self
.saved_networks
.lock()
.await
.insert(network_id, vec![config])
.and_then(|mut v| v.pop()));
}
async fn record_connect_result(
&self,
id: NetworkIdentifier,
credential: &Credential,
bssid: client_types::Bssid,
connect_result: fidl_sme::ConnectResult,
scan_type: client_types::ScanObservation,
) {
self.connect_results_recorded.try_lock().expect("failed to record connect result").push(
ConnectResultRecord {
id: id.clone(),
credential: credential.clone(),
bssid,
connect_result,
scan_type,
},
);
}
async fn record_disconnect(
&self,
id: &NetworkIdentifier,
credential: &Credential,
data: PastConnectionData,
) {
let mut connections_recorded = self.connections_recorded.lock().await;
connections_recorded.push(ConnectionRecord {
id: id.clone(),
credential: credential.clone(),
data,
});
}
async fn record_periodic_metrics(&self) {}
async fn record_scan_result(
&self,
scan_type: ScanResultType,
_results: Vec<client_types::NetworkIdentifierDetailed>,
) {
match scan_type {
ScanResultType::Undirected => {
let mut v = self.passive_scan_result_recorded.lock().await;
*v = true;
}
ScanResultType::Directed(_) => {
let mut v = self.active_scan_result_recorded.lock().await;
*v = true
}
}
}
async fn get_networks(&self) -> Vec<NetworkConfig> {
self.saved_networks
.lock()
.await
.values()
.into_iter()
.flat_map(|cfgs| cfgs.clone())
.collect()
}
async fn get_past_connections(
&self,
_id: &NetworkIdentifier,
_credential: &Credential,
_bssid: &client_types::Bssid,
) -> PastConnectionList {
self.past_connections_response.clone()
}
}
pub fn create_wlan_hasher() -> WlanHasher {
WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes())
}
pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) {
const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value
mpsc::channel(DEFAULT_BUFFER_SIZE)
}
/// Create past connection data with all random values. Tests can set the values they care about.
pub fn random_connection_data() -> PastConnectionData {
let mut rng = rand::thread_rng();
let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into());
let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10));
let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000));
let disconnect_time = connect_time + time_to_connect + uptime;
PastConnectionData::new(
client_types::Bssid(
(0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(),
),
connect_time,
time_to_connect,
disconnect_time,
uptime,
client_types::DisconnectReason::DisconnectDetectedFromSme,
SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), | rng.gen::<u8>().into(),
)
}
| random_line_split |
|
fakes.rs | {
saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>,
connections_recorded: Mutex<Vec<ConnectionRecord>>,
connect_results_recorded: Mutex<Vec<ConnectResultRecord>>,
lookup_compatible_response: Mutex<LookupCompatibleResponse>,
pub fail_all_stores: bool,
pub active_scan_result_recorded: Arc<Mutex<bool>>,
pub passive_scan_result_recorded: Arc<Mutex<bool>>,
pub past_connections_response: PastConnectionList,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectionRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub data: PastConnectionData,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectResultRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub bssid: client_types::Bssid,
pub connect_result: fidl_sme::ConnectResult,
pub scan_type: client_types::ScanObservation,
}
/// Use a struct so that the option can be updated from None to Some to allow the response to be
/// set after FakeSavedNetworksManager is created. Use an optional response value rather than
/// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic
/// for easier debugging.
struct LookupCompatibleResponse {
inner: Option<Vec<NetworkConfig>>,
}
impl LookupCompatibleResponse {
fn new() -> Self {
LookupCompatibleResponse { inner: None }
}
}
impl FakeSavedNetworksManager {
pub fn new() -> Self {
Self {
saved_networks: Mutex::new(HashMap::new()),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Create FakeSavedNetworksManager, saving network configs with the specified
/// network identifiers and credentials at init.
pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self {
let saved_networks = network_configs
.into_iter()
.filter_map(|(id, cred)| {
NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config]))
})
.collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>();
Self {
saved_networks: Mutex::new(saved_networks),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Returns the past connections as they were recorded, rather than how they would have been
/// stored.
pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> {
self.connections_recorded
.try_lock()
.expect("expect locking self.connections_recorded to succeed")
.clone()
}
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> {
self.connect_results_recorded
.try_lock()
.expect("expect locking self.connect_results_recorded to succeed")
.clone()
}
/// Manually change the hidden network probabiltiy of a saved network.
pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) {
let mut saved_networks = self.saved_networks.lock().await;
let networks = match saved_networks.get_mut(&id) {
Some(networks) => networks,
None => {
info!("Failed to find network to update");
return;
}
};
for network in networks.iter_mut() {
network.hidden_probability = hidden_prob;
}
}
pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) {
self.lookup_compatible_response.try_lock().expect("failed to get lock").inner =
Some(response);
}
}
#[async_trait]
impl SavedNetworksManagerApi for FakeSavedNetworksManager {
async fn remove(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<bool, NetworkConfigError> {
let mut saved_networks = self.saved_networks.lock().await;
if let Some(network_configs) = saved_networks.get_mut(&network_id) {
let original_len = network_configs.len();
network_configs.retain(|cfg| cfg.credential != credential);
if original_len != network_configs.len() {
return Ok(true);
}
}
Ok(false)
}
async fn known_network_count(&self) -> usize {
unimplemented!()
}
async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> {
self.saved_networks.lock().await.get(id).cloned().unwrap_or_default()
}
async fn lookup_compatible(
&self,
ssid: &client_types::Ssid,
_scan_security: client_types::SecurityTypeDetailed,
) -> Vec<NetworkConfig> {
let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone();
match predetermined_response {
Some(resp) => resp,
None => {
warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID");
self.saved_networks
.lock()
.await
.iter()
.filter_map(
|(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None },
)
.flatten()
.collect()
}
}
}
/// Note that the configs-per-NetworkIdentifier limit is set to 1 in
/// this mock struct. If a NetworkIdentifier is already stored, writing
/// a config to it will evict the previously store one.
async fn store(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<Option<NetworkConfig>, NetworkConfigError> {
if self.fail_all_stores {
return Err(NetworkConfigError::StashWriteError);
}
let config = NetworkConfig::new(network_id.clone(), credential, false)?;
return Ok(self
.saved_networks
.lock()
.await
.insert(network_id, vec![config])
.and_then(|mut v| v.pop()));
}
async fn record_connect_result(
&self,
id: NetworkIdentifier,
credential: &Credential,
bssid: client_types::Bssid,
connect_result: fidl_sme::ConnectResult,
scan_type: client_types::ScanObservation,
) {
self.connect_results_recorded.try_lock().expect("failed to record connect result").push(
ConnectResultRecord {
id: id.clone(),
credential: credential.clone(),
bssid,
connect_result,
scan_type,
},
);
}
async fn record_disconnect(
&self,
id: &NetworkIdentifier,
credential: &Credential,
data: PastConnectionData,
) {
let mut connections_recorded = self.connections_recorded.lock().await;
connections_recorded.push(ConnectionRecord {
id: id.clone(),
credential: credential.clone(),
data,
});
}
async fn record_periodic_metrics(&self) {}
async fn record_scan_result(
&self,
scan_type: ScanResultType,
_results: Vec<client_types::NetworkIdentifierDetailed>,
) {
match scan_type {
ScanResultType::Undirected => {
let mut v = self.passive_scan_result_recorded.lock().await;
*v = true;
}
ScanResultType::Directed(_) => {
let mut v = self.active_scan_result_recorded.lock().await;
*v = true
}
}
}
async fn get_networks(&self) -> Vec<NetworkConfig> {
self.saved_networks
.lock()
.await
.values()
.into_iter()
.flat_map(|cfgs| cfgs.clone())
.collect()
}
async fn get_past_connections(
&self,
_id: &NetworkIdentifier,
_credential: &Credential,
_bssid: &client_types::Bssid,
) -> PastConnectionList {
self.past_connections_response.clone()
}
}
pub fn create_wlan_hasher() -> WlanHasher {
WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes())
}
pub fn | () -> (mpsc::Sender<String>, mpsc::Receiver<String>) {
const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value
mpsc::channel(DEFAULT_BUFFER_SIZE)
}
/// Create past connection data with all random values. Tests can set the values they care about.
pub fn random_connection_data() -> PastConnectionData {
let mut rng = rand::thread_rng();
let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into());
let time_to_connect = zx::Duration::from_seconds(rng.gen_range::< | create_inspect_persistence_channel | identifier_name |
fakes.rs | {
saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>,
connections_recorded: Mutex<Vec<ConnectionRecord>>,
connect_results_recorded: Mutex<Vec<ConnectResultRecord>>,
lookup_compatible_response: Mutex<LookupCompatibleResponse>,
pub fail_all_stores: bool,
pub active_scan_result_recorded: Arc<Mutex<bool>>,
pub passive_scan_result_recorded: Arc<Mutex<bool>>,
pub past_connections_response: PastConnectionList,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectionRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub data: PastConnectionData,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ConnectResultRecord {
pub id: NetworkIdentifier,
pub credential: Credential,
pub bssid: client_types::Bssid,
pub connect_result: fidl_sme::ConnectResult,
pub scan_type: client_types::ScanObservation,
}
/// Use a struct so that the option can be updated from None to Some to allow the response to be
/// set after FakeSavedNetworksManager is created. Use an optional response value rather than
/// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic
/// for easier debugging.
struct LookupCompatibleResponse {
inner: Option<Vec<NetworkConfig>>,
}
impl LookupCompatibleResponse {
fn new() -> Self {
LookupCompatibleResponse { inner: None }
}
}
impl FakeSavedNetworksManager {
pub fn new() -> Self {
Self {
saved_networks: Mutex::new(HashMap::new()),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Create FakeSavedNetworksManager, saving network configs with the specified
/// network identifiers and credentials at init.
pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self {
let saved_networks = network_configs
.into_iter()
.filter_map(|(id, cred)| {
NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config]))
})
.collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>();
Self {
saved_networks: Mutex::new(saved_networks),
connections_recorded: Mutex::new(vec![]),
connect_results_recorded: Mutex::new(vec![]),
fail_all_stores: false,
lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()),
active_scan_result_recorded: Arc::new(Mutex::new(false)),
passive_scan_result_recorded: Arc::new(Mutex::new(false)),
past_connections_response: PastConnectionList::new(),
}
}
/// Returns the past connections as they were recorded, rather than how they would have been
/// stored.
pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> {
self.connections_recorded
.try_lock()
.expect("expect locking self.connections_recorded to succeed")
.clone()
}
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> {
self.connect_results_recorded
.try_lock()
.expect("expect locking self.connect_results_recorded to succeed")
.clone()
}
/// Manually change the hidden network probabiltiy of a saved network.
pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) {
let mut saved_networks = self.saved_networks.lock().await;
let networks = match saved_networks.get_mut(&id) {
Some(networks) => networks,
None => {
info!("Failed to find network to update");
return;
}
};
for network in networks.iter_mut() {
network.hidden_probability = hidden_prob;
}
}
pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) {
self.lookup_compatible_response.try_lock().expect("failed to get lock").inner =
Some(response);
}
}
#[async_trait]
impl SavedNetworksManagerApi for FakeSavedNetworksManager {
async fn remove(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<bool, NetworkConfigError> {
let mut saved_networks = self.saved_networks.lock().await;
if let Some(network_configs) = saved_networks.get_mut(&network_id) {
let original_len = network_configs.len();
network_configs.retain(|cfg| cfg.credential != credential);
if original_len != network_configs.len() {
return Ok(true);
}
}
Ok(false)
}
async fn known_network_count(&self) -> usize {
unimplemented!()
}
async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> {
self.saved_networks.lock().await.get(id).cloned().unwrap_or_default()
}
async fn lookup_compatible(
&self,
ssid: &client_types::Ssid,
_scan_security: client_types::SecurityTypeDetailed,
) -> Vec<NetworkConfig> {
let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone();
match predetermined_response {
Some(resp) => resp,
None => {
warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID");
self.saved_networks
.lock()
.await
.iter()
.filter_map(
|(id, config)| if id.ssid == *ssid | else { None },
)
.flatten()
.collect()
}
}
}
/// Note that the configs-per-NetworkIdentifier limit is set to 1 in
/// this mock struct. If a NetworkIdentifier is already stored, writing
/// a config to it will evict the previously store one.
async fn store(
&self,
network_id: NetworkIdentifier,
credential: Credential,
) -> Result<Option<NetworkConfig>, NetworkConfigError> {
if self.fail_all_stores {
return Err(NetworkConfigError::StashWriteError);
}
let config = NetworkConfig::new(network_id.clone(), credential, false)?;
return Ok(self
.saved_networks
.lock()
.await
.insert(network_id, vec![config])
.and_then(|mut v| v.pop()));
}
async fn record_connect_result(
&self,
id: NetworkIdentifier,
credential: &Credential,
bssid: client_types::Bssid,
connect_result: fidl_sme::ConnectResult,
scan_type: client_types::ScanObservation,
) {
self.connect_results_recorded.try_lock().expect("failed to record connect result").push(
ConnectResultRecord {
id: id.clone(),
credential: credential.clone(),
bssid,
connect_result,
scan_type,
},
);
}
async fn record_disconnect(
&self,
id: &NetworkIdentifier,
credential: &Credential,
data: PastConnectionData,
) {
let mut connections_recorded = self.connections_recorded.lock().await;
connections_recorded.push(ConnectionRecord {
id: id.clone(),
credential: credential.clone(),
data,
});
}
async fn record_periodic_metrics(&self) {}
async fn record_scan_result(
&self,
scan_type: ScanResultType,
_results: Vec<client_types::NetworkIdentifierDetailed>,
) {
match scan_type {
ScanResultType::Undirected => {
let mut v = self.passive_scan_result_recorded.lock().await;
*v = true;
}
ScanResultType::Directed(_) => {
let mut v = self.active_scan_result_recorded.lock().await;
*v = true
}
}
}
async fn get_networks(&self) -> Vec<NetworkConfig> {
self.saved_networks
.lock()
.await
.values()
.into_iter()
.flat_map(|cfgs| cfgs.clone())
.collect()
}
async fn get_past_connections(
&self,
_id: &NetworkIdentifier,
_credential: &Credential,
_bssid: &client_types::Bssid,
) -> PastConnectionList {
self.past_connections_response.clone()
}
}
pub fn create_wlan_hasher() -> WlanHasher {
WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes())
}
pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) {
const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value
mpsc::channel(DEFAULT_BUFFER_SIZE)
}
/// Create past connection data with all random values. Tests can set the values they care about.
pub fn random_connection_data() -> PastConnectionData {
let mut rng = rand::thread_rng();
let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into());
let time_to_connect = zx::Duration::from_seconds(rng.gen_range::< | { Some(config.clone()) } | conditional_block |
yahoo_plus_save.py | (self,logfile_name,dbpath):
dbpath='''file:{}?mode=rw'''.format(dbpath)
self.conn = sqlite3.connect(dbpath,uri=True)
self.c = self.conn.cursor()
self.logfile_name="yahoo_plus.log"
self.requests_cnt=0
self.https_proxy=proxies[0]
@staticmethod
def convert_tf(d):
if d is True:
return 1
else:
return 0
@staticmethod
def _curtime():
return time.strftime("%d/%m/%Y %H:%M:%S")
def printtext(self,string,wt=False): #wt =withtime
'''Print text to screen and log file'''
if wt is True:
str1="{0} : {1}".format(self._curtime(),string)
try:
print(str1)
except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
#print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a"))
print(str1,file= open(self.logfile_name, "a",encoding="utf8"))
else:
try:
print(string)
except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def printtolog(self,string,wt=False): #wt =withtime
'''Print text to log file only'''
if wt is True:
print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8"))
else:
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def fetchdata_nomapping(self, sql,arg=None):
'''Get data from database, return a list without column name'''
if arg is None:
self.c.execute(sql)
else:
self.c.execute(sql,arg)
b=self.c.fetchall()
return b
def parse_file(self,file):
a_file = open(file, "r",encoding="utf8")
list_of_lists = []
for line in a_file:
stripped_line = line.strip()
if stripped_line[0] =="#": #skip with sharp symbol
continue
if len(stripped_line)==0:
continue
line_list = stripped_line.split(",")
list_of_lists.append(line_list)
a_file.close()
return list_of_lists
def remove_preferences_page(self,soup):
self.printtext("I hate this!!!",wt=True)
calcal=soup.find(id='pref')
calcal=calcal.find(class_='left')
calcal=calcal.find("a").get("href")
#calcal=
return calcal
def new_request(self,url):
headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times")
if self.requests_cnt>22000:
self.https_proxy=next(proxy_pool)
self.printtext("Using proxy {}".format(self.https_proxy),wt=True)
self.requests_cnt=0
while True:
try:
proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy}
response = requests.get(url,allow_redirects=True,proxies=proxyDict)
self.requests_cnt+=1
break
except TimeoutError:
self.printtext("TimeoutError: Connection Timeout",wt=True)
time.sleep(10)
except requests.exceptions.ProxyError as err:
#OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused
self.printtext(("Proxy Error:", err),wt=True)
self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True)
time.sleep(10)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
except requests.exceptions.SSLError as err:
self.printtext(("SSL Error:", err),wt=True)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
if response.status_code == 404:
self.printtext("Error 404 for: {}".format(url),wt=True)
return None
if response.status_code != 200:
self.printtext("HTTP Error {} ".format(response.status_code),wt=True)
print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8"))
return None
#response.raise_for_status()
for resp in response.history:
print(resp.status_code, resp.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def insert_category_data(self,data):
data=data["itemListElement"]
flevel=data[0]["item"]
cat_url00=re.split('\/|\?|\=|\&',flevel)
cat_id00=cat_url00[6]
for i in data:
cat_url=re.split('\/|\?|\=|\&',i["item"])
cat_id=cat_url[6]
#print(cat_id)
level=i["position"]
if level ==1:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"]))
else:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"]))
self.conn.commit()
return data[-1]["item"]
def insert_data(self,oldqid,newqid,cat_id,data,user_url):
try:
data=data["mainEntity"]
except KeyError:
return
title=data["name"]
content=data["text"]
ansc=data["answerCount"]
date=data["dateCreated"]
author_t=data["author"]["@type"]
author_n=data["author"]["name"]
if not oldqid:
self.printtext("Insert {}, question:{}".format(newqid,title),wt=True)
else:
self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True)
self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0]))
user_urlpos=1
if "acceptedAnswer" in data:
data2=data["acceptedAnswer"]
content2=data2["text"]
date2=data2["dateCreated"]
author_t2=data2["author"]["@type"]
author_n2=data2["author"]["name"]
upvote_c2=data2["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
pass
else:
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
if data["suggestedAnswer"]:
for i in data["suggestedAnswer"]:
content2=i["text"]
date2=i["dateCreated"]
author_t2=i["author"]["@type"]
author_n2=i["author"]["name"]
upvote_c2=i["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
user_urlpos+=1;
continue
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
self.conn.commit()
self.printtext("Answer Count: {}".format(ansc),wt=True)
#self.printtext("Insert {} completed".format(newqid),wt=True)
return
def parsing_area(self,link):
#https://hk.answers.yahoo.com/question/index?q | __init__ | identifier_name |
|
yahoo_plus_save.py | H:%M:%S"),string),file= open(self.logfile_name, "a"))
print(str1,file= open(self.logfile_name, "a",encoding="utf8"))
else:
try:
print(string)
except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def printtolog(self,string,wt=False): #wt =withtime
'''Print text to log file only'''
if wt is True:
print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8"))
else:
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def fetchdata_nomapping(self, sql,arg=None):
|
def parse_file(self,file):
a_file = open(file, "r",encoding="utf8")
list_of_lists = []
for line in a_file:
stripped_line = line.strip()
if stripped_line[0] =="#": #skip with sharp symbol
continue
if len(stripped_line)==0:
continue
line_list = stripped_line.split(",")
list_of_lists.append(line_list)
a_file.close()
return list_of_lists
def remove_preferences_page(self,soup):
self.printtext("I hate this!!!",wt=True)
calcal=soup.find(id='pref')
calcal=calcal.find(class_='left')
calcal=calcal.find("a").get("href")
#calcal=
return calcal
def new_request(self,url):
headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times")
if self.requests_cnt>22000:
self.https_proxy=next(proxy_pool)
self.printtext("Using proxy {}".format(self.https_proxy),wt=True)
self.requests_cnt=0
while True:
try:
proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy}
response = requests.get(url,allow_redirects=True,proxies=proxyDict)
self.requests_cnt+=1
break
except TimeoutError:
self.printtext("TimeoutError: Connection Timeout",wt=True)
time.sleep(10)
except requests.exceptions.ProxyError as err:
#OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused
self.printtext(("Proxy Error:", err),wt=True)
self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True)
time.sleep(10)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
except requests.exceptions.SSLError as err:
self.printtext(("SSL Error:", err),wt=True)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
if response.status_code == 404:
self.printtext("Error 404 for: {}".format(url),wt=True)
return None
if response.status_code != 200:
self.printtext("HTTP Error {} ".format(response.status_code),wt=True)
print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8"))
return None
#response.raise_for_status()
for resp in response.history:
print(resp.status_code, resp.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def insert_category_data(self,data):
data=data["itemListElement"]
flevel=data[0]["item"]
cat_url00=re.split('\/|\?|\=|\&',flevel)
cat_id00=cat_url00[6]
for i in data:
cat_url=re.split('\/|\?|\=|\&',i["item"])
cat_id=cat_url[6]
#print(cat_id)
level=i["position"]
if level ==1:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"]))
else:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"]))
self.conn.commit()
return data[-1]["item"]
def insert_data(self,oldqid,newqid,cat_id,data,user_url):
try:
data=data["mainEntity"]
except KeyError:
return
title=data["name"]
content=data["text"]
ansc=data["answerCount"]
date=data["dateCreated"]
author_t=data["author"]["@type"]
author_n=data["author"]["name"]
if not oldqid:
self.printtext("Insert {}, question:{}".format(newqid,title),wt=True)
else:
self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True)
self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0]))
user_urlpos=1
if "acceptedAnswer" in data:
data2=data["acceptedAnswer"]
content2=data2["text"]
date2=data2["dateCreated"]
author_t2=data2["author"]["@type"]
author_n2=data2["author"]["name"]
upvote_c2=data2["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
pass
else:
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
if data["suggestedAnswer"]:
for i in data["suggestedAnswer"]:
content2=i["text"]
date2=i["dateCreated"]
author_t2=i["author"]["@type"]
author_n2=i["author"]["name"]
upvote_c2=i["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
user_urlpos+=1;
continue
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
self.conn.commit()
self.printtext("Answer Count: {}".format(ansc),wt=True)
#self.printtext("Insert {} completed".format(newqid),wt=True)
return
def parsing_area(self,link):
#https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek
self.printtext(link,wt=True)
spilited_url=re.split('\/|\?|\=|\&',link)
questionid=spilited_url[6]
rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,))
rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,))
if len(rows2)>0 or len(rows3)>0:
self.printtext("Already fetched, skip request",wt=True)
return
soup=self.new_request(link)
if soup is None:
return
#print response into a html
#print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8"))
script = soup.find_all('script', type=["application/ld+json"])
new_qid=soup.find("meta", property="og:url")
#print(new_qid["content"] if new_qid else "No | '''Get data from database, return a list without column name'''
if arg is None:
self.c.execute(sql)
else:
self.c.execute(sql,arg)
b=self.c.fetchall()
return b | identifier_body |
yahoo_plus_save.py | H:%M:%S"),string),file= open(self.logfile_name, "a"))
print(str1,file= open(self.logfile_name, "a",encoding="utf8"))
else:
try:
print(string)
except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def printtolog(self,string,wt=False): #wt =withtime
'''Print text to log file only'''
if wt is True:
print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8"))
else:
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def fetchdata_nomapping(self, sql,arg=None):
'''Get data from database, return a list without column name'''
if arg is None:
self.c.execute(sql)
else:
self.c.execute(sql,arg)
b=self.c.fetchall()
return b
def parse_file(self,file):
a_file = open(file, "r",encoding="utf8")
list_of_lists = []
for line in a_file:
stripped_line = line.strip()
if stripped_line[0] =="#": #skip with sharp symbol
continue
if len(stripped_line)==0:
continue
line_list = stripped_line.split(",")
list_of_lists.append(line_list)
a_file.close()
return list_of_lists
def remove_preferences_page(self,soup):
self.printtext("I hate this!!!",wt=True)
calcal=soup.find(id='pref')
calcal=calcal.find(class_='left')
calcal=calcal.find("a").get("href")
#calcal=
return calcal
def new_request(self,url):
headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times")
if self.requests_cnt>22000:
self.https_proxy=next(proxy_pool)
self.printtext("Using proxy {}".format(self.https_proxy),wt=True)
self.requests_cnt=0
while True:
try:
proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy}
response = requests.get(url,allow_redirects=True,proxies=proxyDict)
self.requests_cnt+=1
break
except TimeoutError:
self.printtext("TimeoutError: Connection Timeout",wt=True)
time.sleep(10)
except requests.exceptions.ProxyError as err:
#OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused
self.printtext(("Proxy Error:", err),wt=True)
self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True)
time.sleep(10)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
except requests.exceptions.SSLError as err:
self.printtext(("SSL Error:", err),wt=True)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
if response.status_code == 404:
self.printtext("Error 404 for: {}".format(url),wt=True)
return None
if response.status_code != 200:
self.printtext("HTTP Error {} ".format(response.status_code),wt=True)
print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8"))
return None
#response.raise_for_status()
for resp in response.history:
print(resp.status_code, resp.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def insert_category_data(self,data):
data=data["itemListElement"]
flevel=data[0]["item"]
cat_url00=re.split('\/|\?|\=|\&',flevel)
cat_id00=cat_url00[6]
for i in data:
cat_url=re.split('\/|\?|\=|\&',i["item"])
cat_id=cat_url[6]
#print(cat_id)
level=i["position"]
if level ==1:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"]))
else:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"]))
self.conn.commit()
return data[-1]["item"]
def insert_data(self,oldqid,newqid,cat_id,data,user_url):
try:
data=data["mainEntity"]
except KeyError:
return
title=data["name"]
content=data["text"]
ansc=data["answerCount"]
date=data["dateCreated"]
author_t=data["author"]["@type"]
author_n=data["author"]["name"]
if not oldqid:
self.printtext("Insert {}, question:{}".format(newqid,title),wt=True)
else:
self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True)
self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0]))
user_urlpos=1
if "acceptedAnswer" in data:
data2=data["acceptedAnswer"]
content2=data2["text"]
date2=data2["dateCreated"]
author_t2=data2["author"]["@type"]
author_n2=data2["author"]["name"]
upvote_c2=data2["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
|
else:
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
if data["suggestedAnswer"]:
for i in data["suggestedAnswer"]:
content2=i["text"]
date2=i["dateCreated"]
author_t2=i["author"]["@type"]
author_n2=i["author"]["name"]
upvote_c2=i["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
user_urlpos+=1;
continue
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
self.conn.commit()
self.printtext("Answer Count: {}".format(ansc),wt=True)
#self.printtext("Insert {} completed".format(newqid),wt=True)
return
def parsing_area(self,link):
#https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek
self.printtext(link,wt=True)
spilited_url=re.split('\/|\?|\=|\&',link)
questionid=spilited_url[6]
rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,))
rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,))
if len(rows2)>0 or len(rows3)>0:
self.printtext("Already fetched, skip request",wt=True)
return
soup=self.new_request(link)
if soup is None:
return
#print response into a html
#print(soup.prettify(),file= open(questionid+".html", "w",encoding="utf8"))
script = soup.find_all('script', type=["application/ld+json"])
new_qid=soup.find("meta", property="og:url")
#print(new_qid["content"] if new_qid else "No | pass | conditional_block |
yahoo_plus_save.py | except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
#print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a"))
print(str1,file= open(self.logfile_name, "a",encoding="utf8"))
else:
try:
print(string)
except UnicodeEncodeError:
print("{0} : UnicodeEncodeError, String Contains Non-BMP character".format(self._curtime()))
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def printtolog(self,string,wt=False): #wt =withtime
'''Print text to log file only'''
if wt is True:
print("{0} : {1}".format(time.strftime("%d/%m/%Y %H:%M:%S"),string),file= open(self.logfile_name, "a",encoding="utf8"))
else:
print(string,file= open(self.logfile_name, "a",encoding="utf8"))
def fetchdata_nomapping(self, sql,arg=None):
'''Get data from database, return a list without column name'''
if arg is None:
self.c.execute(sql)
else:
self.c.execute(sql,arg)
b=self.c.fetchall()
return b
def parse_file(self,file):
a_file = open(file, "r",encoding="utf8")
list_of_lists = []
for line in a_file:
stripped_line = line.strip()
if stripped_line[0] =="#": #skip with sharp symbol
continue
if len(stripped_line)==0:
continue
line_list = stripped_line.split(",")
list_of_lists.append(line_list)
a_file.close()
return list_of_lists
def remove_preferences_page(self,soup):
self.printtext("I hate this!!!",wt=True)
calcal=soup.find(id='pref')
calcal=calcal.find(class_='left')
calcal=calcal.find("a").get("href")
#calcal=
return calcal
def new_request(self,url):
headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}
print("Current proxies",self.https_proxy,"used for",self.requests_cnt,"times")
if self.requests_cnt>22000:
self.https_proxy=next(proxy_pool)
self.printtext("Using proxy {}".format(self.https_proxy),wt=True)
self.requests_cnt=0
while True:
try:
proxyDict = {"http" : self.https_proxy, "https" : self.https_proxy}
response = requests.get(url,allow_redirects=True,proxies=proxyDict)
self.requests_cnt+=1
break
except TimeoutError:
self.printtext("TimeoutError: Connection Timeout",wt=True)
time.sleep(10)
except requests.exceptions.ProxyError as err:
#OSError: Tunnel connection failed: 504 Couldn't connect: Connection refused
self.printtext(("Proxy Error:", err),wt=True)
self.printtext("Proxy {} used for {} times".format(self.https_proxy,self.requests_cnt),wt=True)
time.sleep(10)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
except requests.exceptions.SSLError as err:
self.printtext(("SSL Error:", err),wt=True)
self.https_proxy=next(proxy_pool)
self.printtext("Using next proxy:{}".format(self.https_proxy),wt=True)
self.requests_cnt=0
if response.status_code == 404:
self.printtext("Error 404 for: {}".format(url),wt=True)
return None
if response.status_code != 200:
self.printtext("HTTP Error {} ".format(response.status_code),wt=True)
print(url,file= open("yahoo_error_url.txt", "a",encoding="utf8"))
return None
#response.raise_for_status()
for resp in response.history:
print(resp.status_code, resp.url)
soup = BeautifulSoup(response.text, 'html.parser')
return soup
def insert_category_data(self,data):
data=data["itemListElement"]
flevel=data[0]["item"]
cat_url00=re.split('\/|\?|\=|\&',flevel)
cat_id00=cat_url00[6]
for i in data:
cat_url=re.split('\/|\?|\=|\&',i["item"])
cat_id=cat_url[6]
#print(cat_id)
level=i["position"]
if level ==1:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,catname) VALUES(?,?,?)''',(cat_id,level,i["name"]))
else:
self.c.execute('''INSERT OR IGNORE INTO category (catid,level,cat_parentid,catname) VALUES(?,?,?,?)''',(cat_id,level,cat_id00,i["name"]))
self.conn.commit()
return data[-1]["item"]
def insert_data(self,oldqid,newqid,cat_id,data,user_url):
try:
data=data["mainEntity"]
except KeyError:
return
title=data["name"]
content=data["text"]
ansc=data["answerCount"]
date=data["dateCreated"]
author_t=data["author"]["@type"]
author_n=data["author"]["name"]
if not oldqid:
self.printtext("Insert {}, question:{}".format(newqid,title),wt=True)
else:
self.printtext("Insert {}/{}, question:{}".format(newqid,oldqid,title),wt=True)
self.c.execute('''INSERT OR REPLACE INTO question (newqid,oldqid,category_id,title,content,answercount,datecreated,author_type,author_name,author_link) VALUES(?,?,?,?,?,?,?,?,?,?)''',(newqid,oldqid,cat_id,title,content,ansc,date,author_t,author_n,user_url[0]))
user_urlpos=1
if "acceptedAnswer" in data:
data2=data["acceptedAnswer"]
content2=data2["text"]
date2=data2["dateCreated"]
author_t2=data2["author"]["@type"]
author_n2=data2["author"]["name"]
upvote_c2=data2["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
pass
else:
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES(?,?,?,?,?,?,?,?)''',(newqid,1,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
if data["suggestedAnswer"]:
for i in data["suggestedAnswer"]:
content2=i["text"]
date2=i["dateCreated"]
author_t2=i["author"]["@type"]
author_n2=i["author"]["name"]
upvote_c2=i["upvoteCount"]
rows2=self.fetchdata_nomapping("SELECT 1 FROM answers WHERE answer=? AND author_name=? AND datecreated=? LIMIT 1",(content2,author_n2,date2))
if len(rows2)==1:
user_urlpos+=1;
continue
self.c.execute('''INSERT OR IGNORE INTO answers (question_id,is_accepted,answer,datecreated,author_type,author_name,author_link,upvotecount) VALUES (?,?,?,?,?,?,?,?)''',(newqid,0,content2,date2,author_t2,author_n2,user_url[user_urlpos],upvote_c2))
user_urlpos+=1;
self.conn.commit()
self.printtext("Answer Count: {}".format(ansc),wt=True)
#self.printtext("Insert {} completed".format(newqid),wt=True)
return
def parsing_area(self,link):
#https://hk.answers.yahoo.com/question/index?qid=20210405072002AAPcNek
self.printtext(link,wt=True)
spilited_url=re.split('\/|\?|\=|\&',link)
questionid=spilited_url[6]
rows2=self.fetchdata_nomapping("SELECT 1 FROM question WHERE newqid=? LIMIT 1",(questionid,))
rows3=self.fetchdata_nomapping("SELECT 1 FROM question WHERE oldqid=? LIMIT 1",(questionid,))
if len(rows2)>0 or len(rows3)>0:
self.printtext("Already fetched, skip request",wt=True)
return
soup=self.new_request(link)
if soup is None:
return
#print response into a html
#print(soup.prettify(),file= open(questionid+".html", "w | print(str1)
| random_line_split |
|
anichart.py | data into a single list
The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d.
"""
out = list()
## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]}
for cat,shows in data.items():
for show in shows:
show['category'] = cat.title()
out.append(show)
return out
def test_rawdata(data):
""" Checks if the data has been consolidated, returning True if it has not, otherwise False """
base = list(data)[0]
if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]:
return True
return False
def check_rawdata(data):
""" Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """
if test_rawdata(data):
return consolidate_data(data)
return data
def fixstartdate(startdate):
""" Converts startdate default "yyyymmdd" to "dd/mm/yyyy"
If startdate is falsey, returns a default value of "01/01/2017"
"""
if not startdate: return "01/01/2017"
s = str(startdate)
d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]]
return f"{d:0>2}/{m:0>2}/{y:0>4}"
def getseason(data):
""" Tries to determine the season that an anime aired based on its season value and it's ratings """
## Season key is the most reliable
season = data.get("season")
if season:
## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter)
season = str(season)
year = int(f"20{season[:2]}")
## Anichart Season key is 1-indexed
season = int(season[2]) - 1
## This should normally pass; if it consistently does not, we'll have to investigate why
try: return SeasonCharts.buildseason(season,year)
## If something goes wrong, we'll try another method
except: print(f"Failed to parse season: {data['season']}")
## Next, we'll iterate over rankings to try to determine the season/year
## There are multiple types of rankings based on season, year, and both combined,
## so we'll piece it together based on whatever we come across first
season,year = None,None
for ranking in data.get("rankings",list()):
## Quicker exit (without just making this loop its own function)
if season and year: continue
## We'll ignore stuff we've already gotten and assume that nothing in
## rankings contradicts eachother
if not season:
## Defaults to None one way or another if it's not supplied
season = ranking.get("season")
if not year: year = ranking.get("year")
## Check if we made it
if season and year:
## As above, this should always work out-of-the-box
try: return SeasonCharts.buildseason(season,year)
except: print(season,year)
## Welp, we're stumped...
return None
def overwrite_season(function):
""" A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season
If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts.
The season parameter accepted by this decorator will NOT be passed on to the function.
season is updated on the output, but in many cases will mutate the input as well due to the way this module
handles data.
"""
@functools.wraps(function)
def inner(*args, season = None, **kw):
data = function(*args,**kw)
if season:
replace_season(data,season)
return data
return inner
def replace_season(data,season):
|
@overwrite_season
def cleanapidata_csv(data):
""" Cleans the API data for use in a CSV and returns a list of headers along with the clean data.
Converts startdate to EST startdate.
Adds firstepisode date.
Adds EST_airing time.
Features overwrite_season decorator
Returns ([list of Headers (strs)],[list of Shows (dicts)])
"""
out = list()
siteheaders = []
data = check_rawdata(data)
for show in data:
if show.get("airing"):
## Convert airing to datetime for utility
show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST)
## Format start_date to Day/Month/Year
if show.get("start_date"):
## start_date is an integer representing yyyymmdd
startdate = fixstartdate(show['startdate'])
startdt = None
## Test that start_date is a valid date
try:
startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y")
except:
## If not, use "airing"
if show.get("airing"):
## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week)
startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1)
## If "airing" doesn't work, the month and year are normally correct
## so we'll just replace the day
else:
s = str(show['startdate'])
startdate = f"{s[4:6]}/01/{s[:4]}"
## If we ended up using a dt object, convert it back out
if startdt:
startdate = startdt.strftime("%d/%m/%Y")
## Set clean data
show['start_date']= startdate
## Remove breaks from "description" (Excel isn't escaping newline characters)
if show.get('description'):
## <br> characters sometimes sneak in as well
show['description'] = show['description'].replace("\n"," ").replace("<br>","")
## Convert studio from object dict to simply name
if show.get("studio"):
show['studio'] = show['studio']['name']
## Convert "tags" from object dicts to a string of names, and remove all spoiler tags
show['oldtags'] = []
if show.get('tags'):
show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']]))
## If "tags" list is empty, replace it with empty string
else:
show['tags'] = ""
## Convert "youtube_id" to url
if show.get('youtube_id'):
show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"])
######### Generated Data
## Set the first episode's date (used for sorting purposes)
show['first_episode'] = None
## Requires "airing" and "start_date"
if show.get('airing') and show.get('start_date'):
## Create full datetime
airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}"
dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y")
## Convert to EST
dt = dt.replace(tzinfo = web.JST).astimezone(web.EST)
show['first_episode'] = dt.strftime("%d/%m/%Y")
## Airing time in EST
show['EST_airing'] = ""
## Use "airing" to get the airtime in EST as HH:MM apm
if show.get('airing'):
dt = show['airing']['time'].astimezone(web.EST)
## Set clean data
show['EST_airing'] = dt.strftime("%H:%M")
## Convert sites in "external_sites" to their own header
## There may be multiple sites in each site category
## (Official Site, Twitter, etc.), so we'll keep track
## of duplicates locally using enumeration and at the
## method scope using "siteheaders"
## Get a master list of external site names
sitenames = list(set([site['site'] for site in show['external_links']]))
## For each unique site name
for name in sitenames:
## Collect all occurrences
count = [site for site in show['external_links'] if site['site'] == name]
## Enumerate so we can't create additional, unique headers as necessary
## Example Headers: Twitter[, Twitter 2 | """ Replaces the season value of a list of Show objects or data dicts
Mutates the objects in-place.
"""
if not SeasonCharts.matchseason(season):
raise SeasonCharts.SeasonError
## Check data format
if test_rawdata(data):
for cat,shows in data.items():
for show in shows: show['season'] = season
else:
for show in data: show['season'] = season | identifier_body |
anichart.py | ## rankings contradicts eachother
if not season:
## Defaults to None one way or another if it's not supplied
season = ranking.get("season")
if not year: year = ranking.get("year")
## Check if we made it
if season and year:
## As above, this should always work out-of-the-box
try: return SeasonCharts.buildseason(season,year)
except: print(season,year)
## Welp, we're stumped...
return None
def overwrite_season(function):
""" A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season
If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts.
The season parameter accepted by this decorator will NOT be passed on to the function.
season is updated on the output, but in many cases will mutate the input as well due to the way this module
handles data.
"""
@functools.wraps(function)
def inner(*args, season = None, **kw):
data = function(*args,**kw)
if season:
replace_season(data,season)
return data
return inner
def replace_season(data,season):
""" Replaces the season value of a list of Show objects or data dicts
Mutates the objects in-place.
"""
if not SeasonCharts.matchseason(season):
raise SeasonCharts.SeasonError
## Check data format
if test_rawdata(data):
for cat,shows in data.items():
for show in shows: show['season'] = season
else:
for show in data: show['season'] = season
@overwrite_season
def cleanapidata_csv(data):
""" Cleans the API data for use in a CSV and returns a list of headers along with the clean data.
Converts startdate to EST startdate.
Adds firstepisode date.
Adds EST_airing time.
Features overwrite_season decorator
Returns ([list of Headers (strs)],[list of Shows (dicts)])
"""
out = list()
siteheaders = []
data = check_rawdata(data)
for show in data:
if show.get("airing"):
## Convert airing to datetime for utility
show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST)
## Format start_date to Day/Month/Year
if show.get("start_date"):
## start_date is an integer representing yyyymmdd
startdate = fixstartdate(show['startdate'])
startdt = None
## Test that start_date is a valid date
try:
startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y")
except:
## If not, use "airing"
if show.get("airing"):
## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week)
startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1)
## If "airing" doesn't work, the month and year are normally correct
## so we'll just replace the day
else:
s = str(show['startdate'])
startdate = f"{s[4:6]}/01/{s[:4]}"
## If we ended up using a dt object, convert it back out
if startdt:
startdate = startdt.strftime("%d/%m/%Y")
## Set clean data
show['start_date']= startdate
## Remove breaks from "description" (Excel isn't escaping newline characters)
if show.get('description'):
## <br> characters sometimes sneak in as well
show['description'] = show['description'].replace("\n"," ").replace("<br>","")
## Convert studio from object dict to simply name
if show.get("studio"):
show['studio'] = show['studio']['name']
## Convert "tags" from object dicts to a string of names, and remove all spoiler tags
show['oldtags'] = []
if show.get('tags'):
show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']]))
## If "tags" list is empty, replace it with empty string
else:
show['tags'] = ""
## Convert "youtube_id" to url
if show.get('youtube_id'):
show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"])
######### Generated Data
## Set the first episode's date (used for sorting purposes)
show['first_episode'] = None
## Requires "airing" and "start_date"
if show.get('airing') and show.get('start_date'):
## Create full datetime
airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}"
dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y")
## Convert to EST
dt = dt.replace(tzinfo = web.JST).astimezone(web.EST)
show['first_episode'] = dt.strftime("%d/%m/%Y")
## Airing time in EST
show['EST_airing'] = ""
## Use "airing" to get the airtime in EST as HH:MM apm
if show.get('airing'):
dt = show['airing']['time'].astimezone(web.EST)
## Set clean data
show['EST_airing'] = dt.strftime("%H:%M")
## Convert sites in "external_sites" to their own header
## There may be multiple sites in each site category
## (Official Site, Twitter, etc.), so we'll keep track
## of duplicates locally using enumeration and at the
## method scope using "siteheaders"
## Get a master list of external site names
sitenames = list(set([site['site'] for site in show['external_links']]))
## For each unique site name
for name in sitenames:
## Collect all occurrences
count = [site for site in show['external_links'] if site['site'] == name]
## Enumerate so we can't create additional, unique headers as necessary
## Example Headers: Twitter[, Twitter 2, Twitter 3, ...])
for i,site in enumerate(count,start=1):
## The first occurence simply goes by the category of site
if i == 1: duplicatename = name
## Otherwise append the occurence count
else: duplicatename = f"{name} {i}"
## Keep track at the method level so that we can
## output data correctly
if duplicatename not in siteheaders:
siteheaders.append(duplicatename)
## Add to show dict
show[duplicatename] = site['url']
## Remove "external_links" because it is now redundant
del show['external_links']
out.append(show)
headers = list(APIDATAHEADERS)
## Added during cleaning and updating
headers.insert(0,'category')
headers.insert(4,"first_episode")
headers.insert(5,"EST_airing")
headers.extend(sorted(siteheaders))
## Removed during cleaning and updating
headers.remove("external_links")
return headers,out
def outputapidata_csv(filename, data, headers=None):
""" Creates a CSV file with filename using data and headers (if supplied) """
with open(filename,'w',encoding='utf-8',newline = "", ) as f:
if headers:
writer = csv.DictWriter(f,fieldnames = headers)
writer.writeheader()
else:
writer = csv.DictWriter(f)
writer.writerows(out)
def serializeshows(file,shows):
""" Creates a json file containing the shows """
with open(file,'w', encoding = 'utf-8') as f:
json.dump([show.serialize() for show in shows],f)
def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show):
""" Converts a full collection of API data to a list of standard Show Objects (via converttostandard)
If season is provided, replace_season will be called before converting.
"""
data = check_rawdata(data)
out = list()
if season: replace_season(data)
for show in data: out.append(converttostandard(show, showfactory = showfactory))
return out
def converttostandard(show, showfactory = SeasonCharts.Show):
""" Converts an AniChart Show to a standard Show Object """
if not isinstance(show,Show):
raise TypeError("converttostandard requires an AniChart Show Object.")
chartsource = [(CHARTNAME,show['id']),]
if show.get("season") is None or not SeasonCharts.matchseason(show.get("season")):
show['season'] = getseason(show)
season = show['season']
japanese_title = show['title_japanese']
romaji_title = show['title_romaji']
english_title = show['title_english']
additional_titles = show['synonyms']
medium = show['type'] | continuing = show['category'].lower() == "leftovers"
summary = f"(From {CHARTNAME})\n{show.get('description')}"
| random_line_split |
|
anichart.py | 2]) - 1
## This should normally pass; if it consistently does not, we'll have to investigate why
try: return SeasonCharts.buildseason(season,year)
## If something goes wrong, we'll try another method
except: print(f"Failed to parse season: {data['season']}")
## Next, we'll iterate over rankings to try to determine the season/year
## There are multiple types of rankings based on season, year, and both combined,
## so we'll piece it together based on whatever we come across first
season,year = None,None
for ranking in data.get("rankings",list()):
## Quicker exit (without just making this loop its own function)
if season and year: continue
## We'll ignore stuff we've already gotten and assume that nothing in
## rankings contradicts eachother
if not season:
## Defaults to None one way or another if it's not supplied
season = ranking.get("season")
if not year: year = ranking.get("year")
## Check if we made it
if season and year:
## As above, this should always work out-of-the-box
try: return SeasonCharts.buildseason(season,year)
except: print(season,year)
## Welp, we're stumped...
return None
def overwrite_season(function):
""" A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season
If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts.
The season parameter accepted by this decorator will NOT be passed on to the function.
season is updated on the output, but in many cases will mutate the input as well due to the way this module
handles data.
"""
@functools.wraps(function)
def inner(*args, season = None, **kw):
data = function(*args,**kw)
if season:
replace_season(data,season)
return data
return inner
def replace_season(data,season):
""" Replaces the season value of a list of Show objects or data dicts
Mutates the objects in-place.
"""
if not SeasonCharts.matchseason(season):
raise SeasonCharts.SeasonError
## Check data format
if test_rawdata(data):
for cat,shows in data.items():
for show in shows: show['season'] = season
else:
for show in data: show['season'] = season
@overwrite_season
def cleanapidata_csv(data):
""" Cleans the API data for use in a CSV and returns a list of headers along with the clean data.
Converts startdate to EST startdate.
Adds firstepisode date.
Adds EST_airing time.
Features overwrite_season decorator
Returns ([list of Headers (strs)],[list of Shows (dicts)])
"""
out = list()
siteheaders = []
data = check_rawdata(data)
for show in data:
if show.get("airing"):
## Convert airing to datetime for utility
show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST)
## Format start_date to Day/Month/Year
if show.get("start_date"):
## start_date is an integer representing yyyymmdd
startdate = fixstartdate(show['startdate'])
startdt = None
## Test that start_date is a valid date
try:
startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y")
except:
## If not, use "airing"
if show.get("airing"):
## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week)
startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1)
## If "airing" doesn't work, the month and year are normally correct
## so we'll just replace the day
else:
s = str(show['startdate'])
startdate = f"{s[4:6]}/01/{s[:4]}"
## If we ended up using a dt object, convert it back out
if startdt:
startdate = startdt.strftime("%d/%m/%Y")
## Set clean data
show['start_date']= startdate
## Remove breaks from "description" (Excel isn't escaping newline characters)
if show.get('description'):
## <br> characters sometimes sneak in as well
show['description'] = show['description'].replace("\n"," ").replace("<br>","")
## Convert studio from object dict to simply name
if show.get("studio"):
show['studio'] = show['studio']['name']
## Convert "tags" from object dicts to a string of names, and remove all spoiler tags
show['oldtags'] = []
if show.get('tags'):
show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']]))
## If "tags" list is empty, replace it with empty string
else:
show['tags'] = ""
## Convert "youtube_id" to url
if show.get('youtube_id'):
show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"])
######### Generated Data
## Set the first episode's date (used for sorting purposes)
show['first_episode'] = None
## Requires "airing" and "start_date"
if show.get('airing') and show.get('start_date'):
## Create full datetime
airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}"
dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y")
## Convert to EST
dt = dt.replace(tzinfo = web.JST).astimezone(web.EST)
show['first_episode'] = dt.strftime("%d/%m/%Y")
## Airing time in EST
show['EST_airing'] = ""
## Use "airing" to get the airtime in EST as HH:MM apm
if show.get('airing'):
dt = show['airing']['time'].astimezone(web.EST)
## Set clean data
show['EST_airing'] = dt.strftime("%H:%M")
## Convert sites in "external_sites" to their own header
## There may be multiple sites in each site category
## (Official Site, Twitter, etc.), so we'll keep track
## of duplicates locally using enumeration and at the
## method scope using "siteheaders"
## Get a master list of external site names
sitenames = list(set([site['site'] for site in show['external_links']]))
## For each unique site name
for name in sitenames:
## Collect all occurrences
count = [site for site in show['external_links'] if site['site'] == name]
## Enumerate so we can't create additional, unique headers as necessary
## Example Headers: Twitter[, Twitter 2, Twitter 3, ...])
for i,site in enumerate(count,start=1):
## The first occurence simply goes by the category of site
if i == 1: duplicatename = name
## Otherwise append the occurence count
else: duplicatename = f"{name} {i}"
## Keep track at the method level so that we can
## output data correctly
if duplicatename not in siteheaders:
siteheaders.append(duplicatename)
## Add to show dict
show[duplicatename] = site['url']
## Remove "external_links" because it is now redundant
del show['external_links']
out.append(show)
headers = list(APIDATAHEADERS)
## Added during cleaning and updating
headers.insert(0,'category')
headers.insert(4,"first_episode")
headers.insert(5,"EST_airing")
headers.extend(sorted(siteheaders))
## Removed during cleaning and updating
headers.remove("external_links")
return headers,out
def outputapidata_csv(filename, data, headers=None):
""" Creates a CSV file with filename using data and headers (if supplied) """
with open(filename,'w',encoding='utf-8',newline = "", ) as f:
if headers:
writer = csv.DictWriter(f,fieldnames = headers)
writer.writeheader()
else:
writer = csv.DictWriter(f)
writer.writerows(out)
def serializeshows(file,shows):
""" Creates a json file containing the shows """
with open(file,'w', encoding = 'utf-8') as f:
json.dump([show.serialize() for show in shows],f)
def convertshowstostandard(data, season = None, showfactory = SeasonCharts.Show):
""" Converts a full collection of API data to a list of standard Show Objects (via converttostandard)
If season is provided, replace_season will be called before converting.
"""
data = check_rawdata(data)
out = list()
if season: replace_season(data)
for show in data: out.append(converttostandard(show, showfactory = showfactory))
return out
def | converttostandard | identifier_name |
|
anichart.py | into a single list
The original root value (i.e.- tv, leftovers) is added as the "category" key and title()'d.
"""
out = list()
## API data is organized in {category (tv,tvshort,movie,etc.):[list of show dicts]}
for cat,shows in data.items():
for show in shows:
show['category'] = cat.title()
out.append(show)
return out
def test_rawdata(data):
""" Checks if the data has been consolidated, returning True if it has not, otherwise False """
base = list(data)[0]
if base in ["tv","leftovers","tv short","movie","OVA / ONA / Special"]:
return True
return False
def check_rawdata(data):
""" Checks if the data has been consolidated using test_rawdata; if it hasn't, this function will consolidate the data """
if test_rawdata(data):
return consolidate_data(data)
return data
def fixstartdate(startdate):
""" Converts startdate default "yyyymmdd" to "dd/mm/yyyy"
If startdate is falsey, returns a default value of "01/01/2017"
"""
if not startdate: |
s = str(startdate)
d,m,y = [max(dt,1) for dt in [int(s[6:8]),int(s[4:6]),int(s[:4])]]
return f"{d:0>2}/{m:0>2}/{y:0>4}"
def getseason(data):
""" Tries to determine the season that an anime aired based on its season value and it's ratings """
## Season key is the most reliable
season = data.get("season")
if season:
## Season key is an integer formatted "YYS" and is 2000-based (i.e.- 171 == 2017-Winter)
season = str(season)
year = int(f"20{season[:2]}")
## Anichart Season key is 1-indexed
season = int(season[2]) - 1
## This should normally pass; if it consistently does not, we'll have to investigate why
try: return SeasonCharts.buildseason(season,year)
## If something goes wrong, we'll try another method
except: print(f"Failed to parse season: {data['season']}")
## Next, we'll iterate over rankings to try to determine the season/year
## There are multiple types of rankings based on season, year, and both combined,
## so we'll piece it together based on whatever we come across first
season,year = None,None
for ranking in data.get("rankings",list()):
## Quicker exit (without just making this loop its own function)
if season and year: continue
## We'll ignore stuff we've already gotten and assume that nothing in
## rankings contradicts eachother
if not season:
## Defaults to None one way or another if it's not supplied
season = ranking.get("season")
if not year: year = ranking.get("year")
## Check if we made it
if season and year:
## As above, this should always work out-of-the-box
try: return SeasonCharts.buildseason(season,year)
except: print(season,year)
## Welp, we're stumped...
return None
def overwrite_season(function):
""" A wrapper to allow a function to accept a season and overwrite all data entries with the given season using replace_season
If season is provided, it must be a string that conforms to the standard Season format found in SeasonCharts.
The season parameter accepted by this decorator will NOT be passed on to the function.
season is updated on the output, but in many cases will mutate the input as well due to the way this module
handles data.
"""
@functools.wraps(function)
def inner(*args, season = None, **kw):
data = function(*args,**kw)
if season:
replace_season(data,season)
return data
return inner
def replace_season(data,season):
""" Replaces the season value of a list of Show objects or data dicts
Mutates the objects in-place.
"""
if not SeasonCharts.matchseason(season):
raise SeasonCharts.SeasonError
## Check data format
if test_rawdata(data):
for cat,shows in data.items():
for show in shows: show['season'] = season
else:
for show in data: show['season'] = season
@overwrite_season
def cleanapidata_csv(data):
""" Cleans the API data for use in a CSV and returns a list of headers along with the clean data.
Converts startdate to EST startdate.
Adds firstepisode date.
Adds EST_airing time.
Features overwrite_season decorator
Returns ([list of Headers (strs)],[list of Shows (dicts)])
"""
out = list()
siteheaders = []
data = check_rawdata(data)
for show in data:
if show.get("airing"):
## Convert airing to datetime for utility
show['airing']['time'] = datetime.datetime.fromtimestamp(show['airing']['time']).replace(tzinfo = web.JST)
## Format start_date to Day/Month/Year
if show.get("start_date"):
## start_date is an integer representing yyyymmdd
startdate = fixstartdate(show['startdate'])
startdt = None
## Test that start_date is a valid date
try:
startdt = datetime.datetime.strptime(startdate, "%d/%m/%Y")
except:
## If not, use "airing"
if show.get("airing"):
## If we're multiple weeks in, compensate (next_episode = 1 means we're on the first week)
startdt = show['airing']['time'] - datetime.timedelta(weeks = show['airing']['next_episode']-1)
## If "airing" doesn't work, the month and year are normally correct
## so we'll just replace the day
else:
s = str(show['startdate'])
startdate = f"{s[4:6]}/01/{s[:4]}"
## If we ended up using a dt object, convert it back out
if startdt:
startdate = startdt.strftime("%d/%m/%Y")
## Set clean data
show['start_date']= startdate
## Remove breaks from "description" (Excel isn't escaping newline characters)
if show.get('description'):
## <br> characters sometimes sneak in as well
show['description'] = show['description'].replace("\n"," ").replace("<br>","")
## Convert studio from object dict to simply name
if show.get("studio"):
show['studio'] = show['studio']['name']
## Convert "tags" from object dicts to a string of names, and remove all spoiler tags
show['oldtags'] = []
if show.get('tags'):
show['tags'] = ", ".join(sorted([tag['name'] for tag in show['tags'] if not tag['spoiler']]))
## If "tags" list is empty, replace it with empty string
else:
show['tags'] = ""
## Convert "youtube_id" to url
if show.get('youtube_id'):
show['youtube_id'] = YOUTUBELINK.format(youtube_id = show["youtube_id"])
######### Generated Data
## Set the first episode's date (used for sorting purposes)
show['first_episode'] = None
## Requires "airing" and "start_date"
if show.get('airing') and show.get('start_date'):
## Create full datetime
airtime = f"{show['airing']['time'].strftime('%H:%M')} {show['start_date']}"
dt = datetime.datetime.strptime(airtime,"%H:%M %d/%m/%Y")
## Convert to EST
dt = dt.replace(tzinfo = web.JST).astimezone(web.EST)
show['first_episode'] = dt.strftime("%d/%m/%Y")
## Airing time in EST
show['EST_airing'] = ""
## Use "airing" to get the airtime in EST as HH:MM apm
if show.get('airing'):
dt = show['airing']['time'].astimezone(web.EST)
## Set clean data
show['EST_airing'] = dt.strftime("%H:%M")
## Convert sites in "external_sites" to their own header
## There may be multiple sites in each site category
## (Official Site, Twitter, etc.), so we'll keep track
## of duplicates locally using enumeration and at the
## method scope using "siteheaders"
## Get a master list of external site names
sitenames = list(set([site['site'] for site in show['external_links']]))
## For each unique site name
for name in sitenames:
## Collect all occurrences
count = [site for site in show['external_links'] if site['site'] == name]
## Enumerate so we can't create additional, unique headers as necessary
## Example Headers: Twitter[, Twitter | return "01/01/2017" | conditional_block |
runIPRscan.py | 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print(resultType['identifier'])
if(hasattr(resultType, 'label')):
print("\t", resultType['label'])
if(hasattr(resultType, 'description')):
print("\t", resultType['description'])
if(hasattr(resultType, 'mediaType')):
print("\t", resultType['mediaType'])
if(hasattr(resultType, 'fileSuffix')):
print("\t", resultType['fileSuffix'])
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print(result, file=sys.stderr)
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
else:
filename = jobId + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
# Write a result file
if not options.outformat or options.outformat == str(resultType['identifier']):
# Get the result
result = serviceGetResult(jobId, str(resultType['identifier']))
fh = open(filename, 'w')
fh.write(result)
fh.close()
print(filename)
printDebugMessage('getResult', 'End', 1)
# Read a file
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
# No options... print help.
if numOpts < 2:
parser.print_help()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Submit job
elif options.email and not options.jobid:
params = {}
if 1 > 0:
if os.access(options.input, os.R_OK): # Read file into content
| params['sequence'] = readFile(options.input) | conditional_block |
|
runIPRscan.py | to use, see --paramDetail appl')
parser.add_argument('--crc', action="store_true",
help='enable InterProScan Matches look-up (ignored)')
parser.add_argument('--nocrc', action="store_true",
help='disable InterProScan Matches look-up (ignored)')
parser.add_argument('--goterms', action="store_true",
help='enable inclusion of GO terms')
parser.add_argument('--nogoterms', action="store_true",
help='disable inclusion of GO terms')
parser.add_argument('--pathways', action="store_true",
help='enable inclusion of pathway terms')
parser.add_argument('--nopathways', action="store_true",
help='disable inclusion of pathway terms')
parser.add_argument('--sequence', help='input sequence file name')
# General options
parser.add_argument('--email', required=True, help='e-mail address')
parser.add_argument('--title', help='job title')
parser.add_argument('--outfile', help='file name for results')
parser.add_argument('--outformat', help='output format for results')
parser.add_argument('--async', action='store_true', help='asynchronous mode')
parser.add_argument('--jobid', help='job identifier')
parser.add_argument('--polljob', action="store_true", help='get job result')
parser.add_argument('--status', action="store_true", help='get job status')
parser.add_argument('--resultTypes', action='store_true',
help='get result types')
parser.add_argument('--params', action='store_true',
help='list input parameters')
parser.add_argument('--paramDetail', help='get details for parameter')
parser.add_argument('--quiet', action='store_true',
help='decrease output level')
parser.add_argument('--verbose', action='store_true',
help='increase output level')
parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service')
parser.add_argument('--debugLevel', type=int,
default=debugLevel, help='debug output level')
options = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if(level <= debugLevel):
print('[' + functionName + '] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def | (paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
print | printGetParameterDetails | identifier_name |
runIPRscan.py | % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print(resultType['identifier'])
if(hasattr(resultType, 'label')):
print("\t", resultType['label'])
if(hasattr(resultType, 'description')):
print("\t", resultType['description'])
if(hasattr(resultType, 'mediaType')):
print("\t", resultType['mediaType'])
if(hasattr(resultType, 'fileSuffix')):
print("\t", resultType['fileSuffix'])
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print(result, file=sys.stderr)
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
| printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
else:
filename = jobId + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
# Write a result file
if not options.outformat or options.outformat == str(resultType['identifier']):
# Get the result
result = serviceGetResult(jobId, str(resultType['identifier'])) | identifier_body |
|
runIPRscan.py | debug output level')
options = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if(level <= debugLevel):
print('[' + functionName + '] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print(resultType['identifier'])
if(hasattr(resultType, 'label')):
print("\t", resultType['label'])
if(hasattr(resultType, 'description')):
print("\t", resultType['description'])
if(hasattr(resultType, 'mediaType')):
print("\t", resultType['mediaType'])
if(hasattr(resultType, 'fileSuffix')):
print("\t", resultType['fileSuffix'])
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print(result, file=sys.stderr)
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
|
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary | random_line_split |
|
main.py |
itemToStore = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# Nice little easter egg :)
if itemToStore == 'troll in bag':
print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end)
return
# find out if the player doesn't have that item
if itemToStore not in invDescWords:
print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))
return
# get the item name that the player's command describes
try:
item = getFirstItemMatchingDesc(itemToStore, inventory)
# broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.
if item != None:
print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
inventory.remove(item)
worldRooms[location][STORAGE].append(item)
except KeyError:
return("Don't even think about it buster brown.")
#item = getFirstItemMatchingDesc(itemToStore, inventory)
#if item != None:
# print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
# inventory.remove(item) # remove from inventory
# worldRooms[location][STORAGE].append(item) # add to the container
def complete_take(self, text, line, begidx, endidx):
possibleItems = []
text = text.lower()
# if the user has only typed "take" but no item name:
if not text:
return getAllFirstDescWords(worldRooms[location][GROUND])
# otherwise, get a list of all "description words" for ground items matching the command text so far:
for item in list(set(worldRooms[location][GROUND])):
for descWord in worldItems[item][DESCWORDS]:
if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
def complete_drop(self, text, line, begidx, endidx):
possibleItems = []
itemToDrop = text.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
for descWord in invDescWords:
if line.startswith('drop %s' % (descWord)):
return [] # command is complete
# if the user has only typed "drop" but no item name:
if itemToDrop == '':
return getAllFirstDescWords(inventory)
# otherwise, get a list of all "description words" for inventory items matching the command text so far:
for descWord in invDescWords:
if descWord.startswith(text):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
def do_look(self, arg):
"""Look at an item, direction, or the area:
"look" - display the current area's description
"look <direction>" - display the description of the area in that direction
"look exits" - display the description of all adjacent areas
"look <item>" - display the description of an item on the ground in storage or in your inventory"""
lookingAt = arg.lower()
if lookingAt == '':
# "look" will re-print the area description
displayLocation(location, default)
return
if lookingAt == 'exits':
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction in worldRooms[location]:
print('%s: %s' % (direction.title(), worldRooms[location][direction]))
return
if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'):
if lookingAt.startswith('n') and NORTH in worldRooms[location]:
print(worldRooms[location][NORTH])
elif lookingAt.startswith('w') and WEST in worldRooms[location]:
print(worldRooms[location][WEST])
elif lookingAt.startswith('e') and EAST in worldRooms[location]:
print(worldRooms[location][EAST])
elif lookingAt.startswith('s') and SOUTH in worldRooms[location]:
print(worldRooms[location][SOUTH])
elif lookingAt.startswith('u') and UP in worldRooms[location]:
print(worldRooms[location][UP])
elif lookingAt.startswith('d') and DOWN in worldRooms[location]:
print(worldRooms[location][DOWN])
else:
print('There is nothing in that direction.')
return
# see if the item being looked at is on the ground at this location or in storage.
#item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE])
item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND])
if item != None:
print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH)))
return
# see if the item being looked at is in the inventory
item = getFirstItemMatchingDesc(lookingAt, inventory)
if item != None:
print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH)))
return
print('You do not see that nearby.')
def complete_look(self, text, line, begidx, endidx):
possibleItems = []
lookingAt = text.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
groundDescWords = getAllDescWords(worldRooms[location][GROUND])
for descWord in invDescWords + groundDescWords + [NORTH, SOUTH, EAST, WEST, UP, DOWN]:
if line.startswith('look %s' % (descWord)):
return [] # command is complete
# if the user has only typed "look" but no item name, show all items on ground and directions:
if lookingAt == '':
possibleItems.extend(getAllFirstDescWords(worldRooms[location][GROUND][STORAGE]))
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction in worldRooms[location]:
possibleItems.append(direction)
return list(set(possibleItems)) # make list unique
# otherwise, get a list of all "description words" for ground items matching the command text so far:
for descWord in groundDescWords:
if descWord.startswith(lookingAt):
possibleItems.append(descWord)
# check for matching directions
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction.startswith(lookingAt):
possibleItems.append(direction)
# get a list of all "description words" for inventory items matching the command text so far:
for descWord in invDescWords:
if descWord.startswith(lookingAt):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
#arg = arg.lower()
# Extra ways of writing commands
do_read = do_look
do_l = do_look
def do_eat(self, arg):
""""eat <item>" - eat an item in your inventory."""
itemToEat = arg.lower()
if itemToEat == '':
print('Eat what? Type "inventory" or "inv" to see whats in your inventory.')
return
cantEat = False
for item in getAllItemsMatchingDesc(itemToEat, inventory):
if worldItems[item].get(EDIBLE, False) == False:
cantEat = True
continue # there may be other items named this that you can eat, so we continue checking
# NOTE - If you wanted to implement hunger levels, here is where
# you would add code that changes the player's hunger level.
print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC]))
inventory.remove(item)
return
if cantEat:
print('I dont think the "%s" would like you to do that...' % (worldItems[item][SHORTDESC]))
else:
print('You do not have "%s". Type "inventory" or "inv" to see what in your inventory.' % (itemToEat))
def complete_eat(self, text, line, begidx, endidx):
itemToEat = text.lower()
possibleItems = []
# if the user has only typed "eat" but no item name:
if itemToEat == '':
return getAllFirstDescWords(inventory)
# otherwise, get a list of all "description words" for edible inventory items matching the command text so far:
for item in list(set(inventory)):
for descWord in worldItems[item][DESCWORDS]:
if descWord.startswith(text) and worldItems[item].get(EDIBLE, False):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
do_exit = do_quit # another way of exiting the game with a differnt word
def | do_clear | identifier_name |
|
main.py | arg):
"""Go to the area downwards, if possible."""
moveDirection('down')
# Since the code is the exact same, we can just copy the
# methods with shortened names:
do_n = do_north
do_s = do_south
do_e = do_east
do_w = do_west
do_u = do_up
do_d = do_down
def do_exits(self, arg):
"""Toggle showing full exit descriptions or brief exit descriptions."""
global showFullExits
showFullExits = not showFullExits
if showFullExits:
print('Showing full exit descriptions.')
else:
print('Showing brief exit descriptions.')
def do_inventory(self, arg):
"""Display a list of the items in your possession."""
if len(inventory) == 0:
print('Inventory:\n (nothing)')
return
# first get a count of each distinct item in the inventory
itemCount = {}
for item in inventory:
if item in itemCount.keys():
itemCount[item] += 1
else:
itemCount[item] = 1
# get a list of inventory items with duplicates removed:
print('Inventory:')
for item in set(inventory):
if itemCount[item] > 1:
print(' %s (%s)' % (item, itemCount[item]))
else:
print(' ' + item)
do_inv = do_inventory
do_i = do_inventory
def do_take(self, arg):
""""take <item> - Take an item on the ground."""
# put this value in a more suitably named variable
itemToTake = arg.lower()
if itemToTake == '':
print('Take what? Type "look" the items on the ground here.')
return
cantTake = False
# get the item name that the player's command describes
for item in getAllItemsMatchingDesc(itemToTake, worldRooms[location][GROUND]):
if worldItems[item].get(TAKEABLE, True) == False:
cantTake = True
continue # there may be other items named this that you can take, so we continue checking
print("Taken.")
worldRooms[location][GROUND].remove(item) # remove from the ground
inventory.append(item) # add to inventory
return
# something funny
if itemToTake == 'chest':
print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end)
return
if cantTake:
print('You cannot take "%s".' % (itemToTake))
else:
print('That is not in or around the area, maybe it was your imagination?')
def do_use(self, arg):
""""use <item> - Use an item in in your inventory."""
itemToUse = arg.lower()
if itemToUse == '':
print('Use what? Type "inv" to see the items in your invetory.')
return
cantUse = False
#look up the item the player describes
invDescWords = getAllDescWords(inventory)
if itemToUse not in invDescWords:
print('You do not have that item to use it')
return
for item in getAllItemsMatchingDesc(itemToUse, inventory):
if worldItems[item].get(USEABLE, True) == False:
cantUse = True
continue
print('%s' % (worldItems[item][USEDESCTRUE]))
#print('You use %s' % (worldItems[item][SHORTDESC]))
#inventory.remove(item)
return
if cantUse:
print('You cannot use "%s".' % (itemToUse))
else:
print('You do not have that item to use.')
def do_drop(self, arg):
""""drop <item> - Drop an item from your inventory onto the ground."""
# put this value in a more suitably named variable
itemToDrop = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# find out if the player doesn't have that item
if itemToDrop not in invDescWords:
print('You do not have "%s" in your inventory.' % (itemToDrop))
return
# get the item name that the player's command describes
item = getFirstItemMatchingDesc(itemToDrop, inventory)
if item != None:
print('You drop %s.' % (worldItems[item][SHORTDESC]))
inventory.remove(item) # remove from inventory
worldRooms[location][GROUND].append(item) # add to the ground
# put items in a item container
def do_put(self, arg):
""""put <item> in <item> - Puts an item in a container."""
# put this value in a more suitably named variable
itemToStore = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# Nice little easter egg :)
if itemToStore == 'troll in bag':
print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end)
return
# find out if the player doesn't have that item
if itemToStore not in invDescWords:
print('You want to put "%s" in what?!' % (itemToStore))
return
# get the item name that the player's command describes
item = getFirstItemMatchingDesc(itemToStore, inventory)
if item != None:
print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))
inventory.remove(item) # remove from inventory
worldRooms[location][ITEMINV].append(item) # add to the container
def do_store(self, arg):
""""store <item> - Stores an item in a safe place, assuming that the room has a storage area."""
# put this value in a more suitably named variable
itemToStore = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# Nice little easter egg :)
if itemToStore == 'troll in bag':
print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end)
return
# find out if the player doesn't have that item
if itemToStore not in invDescWords:
print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))
return
# get the item name that the player's command describes
try:
item = getFirstItemMatchingDesc(itemToStore, inventory)
# broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.
if item != None:
print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
inventory.remove(item)
worldRooms[location][STORAGE].append(item)
except KeyError:
return("Don't even think about it buster brown.")
#item = getFirstItemMatchingDesc(itemToStore, inventory)
#if item != None:
# print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
# inventory.remove(item) # remove from inventory
# worldRooms[location][STORAGE].append(item) # add to the container
def complete_take(self, text, line, begidx, endidx):
possibleItems = []
text = text.lower()
# if the user has only typed "take" but no item name:
if not text:
return getAllFirstDescWords(worldRooms[location][GROUND])
# otherwise, get a list of all "description words" for ground items matching the command text so far:
for item in list(set(worldRooms[location][GROUND])):
for descWord in worldItems[item][DESCWORDS]:
if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
def complete_drop(self, text, line, begidx, endidx):
| possibleItems = []
itemToDrop = text.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
for descWord in invDescWords:
if line.startswith('drop %s' % (descWord)):
return [] # command is complete
# if the user has only typed "drop" but no item name:
if itemToDrop == '':
return getAllFirstDescWords(inventory)
# otherwise, get a list of all "description words" for inventory items matching the command text so far:
for descWord in invDescWords:
if descWord.startswith(text):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
| identifier_body |
|
main.py | Books',
LONGDESC: 'A pile of dusty old books pages half rotting away, its hard to make out what is written in them, Hitchickers Guide to the Galaxy, How to stew a ham in 43 different ways and various other, written, human detritus.',
EDIBLE: False,
USEABLE: False,
DESCWORDS: ['books','book'],
STORAGEDESC: '[Dusty Books] The books lie at the bottom of the chest looking miserable.'
},
'Gun': {
GROUNDDESC: 'A gun lies on the floor here.',
SHORTDESC: 'Gun',
LONGDESC: 'A 32 ACP revolver it has 5 chaimbers, one of the cartridges has been fired.',
EDIBLE: False,
USEABLE: True,
DESCWORDS: ['Gun','gun','revolver'],
STORAGEDESC: '[Gun] Better the gun be in here then in my hands..',
},
'Sack': {
GROUNDDESC: 'A sack of burlap lies on the floor here',
SHORTDESC: 'Sack',
LONGDESC: 'Its an old sack used for storing things in, it smells like onions.',
EDIBLE: False,
DESCWORDS: ['Sack', 'bag', 'sack'],
STORAGEDESC: '[Sack] A container with in a container, its like that terrible movie with Leonardo DiCaprio..',
# Attempting "Items within Items"
ITEMINV: ['Lunch'],
},
'Chest' : {
SHORTDESC: 'A wooden chest',
GROUNDDESC: 'A wooden chest resides in the far corner of this room with an incription on it.',
LONGDESC: 'Its an old wooden chest with the inscription "Por viaj malmolaj gajnitaj eroj." the language begins with an Esp... you know that much.',
EDIBLE: False,
TAKEABLE: False,
USEABLE: False,
DESCWORDS: ['Chest', 'Box', 'Crate', 'chest', 'box', 'crate'],
},
'Troll' : {
SHORTDESC: 'A troll figure',
GROUNDDESC : 'A troll is somewhere around here.',
LONGDESC : 'A small troll figure carved from wood, you turn it over in your hands, an inscription on the base "RIP Inbag the Troll.", a disembodied scottish voice tells you to not put it in your bag.',
EDIBLE : False,
USEABLE: False,
TAKEABLE : False,
DESCWORDS : ['Troll', 'troll', 'figure', 'statue'],
STORAGEDESC : '[Troll] The troll lies disgruntled in the chest, its dark in there, it might be eaten by a Grew.'
},
'Cassette' : {
SHORTDESC: 'A cassette tape',
GROUNDDESC: 'A cassette tape lies here on the floor, someone must have "dropped the bass".',
LONGDESC: 'You turn the cassette tape over in your hands, the lable reads "Best of the 60s", it possibly contains Fleetwood Mac and thus must be destroyed immediately.',
EDIBLE: False,
USEABLE: False,
TAKEABLE: True,
DESCWORDS: ['tape', 'cassette', 'music tape', 'music'],
STORAGEDESC : '[Tape] A tape lies in the bottom of the chest, we would have prefered you to burn it but this choice is yours.',
},
'Weevels' : {
SHORTDESC: 'A pile of dead weevels',
GROUNDDESC : 'A pile of rotting weeveles lay on the ground.',
LONGDESC : 'Its a pile of fucking rotting weevels',
EDIBLE : True,
USEABLE : False,
TAKEABLE : True,
DESCWORDS: ['weevels', 'pile of weevels', 'rotting weevels'],
STORAGEDESC : '[Weevels] A pile of rotting weevel husks lie at the bottom of the chest.'
}
}
global default
default = ""
def displayLocation(loc, default):
"""A helper function for displaying an area's description and exits."""
# Print the room name.
print(bcolors.start + loc + bcolors.end)
print('=' * len(loc))
# Print the room's description (using textwrap.wrap())
print('\n'.join(textwrap.wrap(worldRooms[loc][DESC], SCREEN_WIDTH)))
# Print all the items on the ground.
if len(worldRooms[loc][GROUND]) > 0:
print("")
for item in worldRooms[loc][GROUND]:
print(worldItems[item][GROUNDDESC])
try:
# Check storage exists
if len(worldRooms[loc][STORAGE]) > 0:
print(bcolors.start + "The treasures you have accrewed thus far are (Chest) :" + bcolors.end)
for item in worldRooms[loc][STORAGE]:
print (worldItems[item][STORAGEDESC])
except KeyError:
return default
# Print all the exits.
exits = []
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction in worldRooms[loc].keys():
exits.append(direction.title())
print("")
if showFullExits:
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction in worldRooms[location]:
print('%s: %s' % (direction.title(), worldRooms[location][direction]))
else:
print('Exits: %s' % ' '.join(exits))
def moveDirection(direction):
"""A helper function that changes the location of the player."""
global location
if direction in worldRooms[location]:
print('You move to the %s.' % direction)
location = worldRooms[location][direction]
displayLocation(location, default)
else:
print('You cannot move in that direction')
def getAllDescWords(itemList):
"""Returns a list of "description words" for each item named in itemList."""
itemList = list(set(itemList)) # make itemList unique
descWords = []
for item in itemList:
descWords.extend(worldItems[item][DESCWORDS])
return list(set(descWords))
def getAllFirstDescWords(itemList):
"""Returns a list of the first "description word" in the list of
description words for each item named in itemList."""
itemList = list(set(itemList)) # make itemList unique
descWords = []
for item in itemList:
descWords.append(worldItems[item][DESCWORDS][0])
return list(set(descWords))
def getFirstItemMatchingDesc(desc, itemList):
itemList = list(set(itemList)) # make itemList unique
for item in itemList:
if desc in worldItems[item][DESCWORDS]:
return item
return None
def getAllItemsMatchingDesc(desc, itemList):
itemList = list(set(itemList)) # make itemList unique
matchingItems = []
for item in itemList:
if desc in worldItems[item][DESCWORDS]:
matchingItems.append(item)
return matchingItems
class TextAdventureCmd(cmd.Cmd):
prompt = '\n> '
# The default() method is called when none of the other do_*() command methods match.
def default(self, arg):
print('I do not understand that command. Type ' + bcolors.start + '"help"' + bcolors.end + ' for a list of commands.')
# A very simple "quit" command to terminate the program:
def do_quit(self, arg):
"""Quit the game."""
return True # this exits the Cmd application loop in TextAdventureCmd.cmdloop()
def help_combat(self):
print('Combat is not implemented in this program.')
# These direction commands have a long (i.e. north) and show (i.e. n) form.
# Since the code is basically the same, I put it in the moveDirection()
# function.
def do_north(self, arg):
"""Go to the area to the north, if possible."""
moveDirection('north')
def do_south(self, arg):
"""Go to the area to the south, if possible."""
moveDirection('south')
def do_east(self, arg):
"""Go to the area to the east, if possible."""
moveDirection('east')
def do_west(self, arg):
"""Go to the area to the west, if possible."""
moveDirection('west')
def do_up(self, arg):
"""Go to the area upwards, if possible."""
moveDirection('up')
def do_down(self, arg):
"""Go to the area downwards, if possible."""
moveDirection('down')
# Since the code is the exact same, we can just copy the
# methods with shortened names:
do_n = do_north
do_s = do_south
do_e = do_east
do_w = do_west
do_u = do_up
do_d = do_down
def do_exits(self, arg):
"""Toggle showing full exit descriptions or brief exit descriptions."""
global showFullExits
showFullExits = not showFullExits
if showFullExits:
print('Showing full exit descriptions.')
else:
| print('Showing brief exit descriptions.')
| random_line_split |
|
main.py | chest':
print(bcolors.start + "Your feeble arms buckle under the weight of the enormous chest, nice try you theiving git." + bcolors.end)
return
if cantTake:
print('You cannot take "%s".' % (itemToTake))
else:
print('That is not in or around the area, maybe it was your imagination?')
def do_use(self, arg):
""""use <item> - Use an item in in your inventory."""
itemToUse = arg.lower()
if itemToUse == '':
print('Use what? Type "inv" to see the items in your invetory.')
return
cantUse = False
#look up the item the player describes
invDescWords = getAllDescWords(inventory)
if itemToUse not in invDescWords:
print('You do not have that item to use it')
return
for item in getAllItemsMatchingDesc(itemToUse, inventory):
if worldItems[item].get(USEABLE, True) == False:
cantUse = True
continue
print('%s' % (worldItems[item][USEDESCTRUE]))
#print('You use %s' % (worldItems[item][SHORTDESC]))
#inventory.remove(item)
return
if cantUse:
print('You cannot use "%s".' % (itemToUse))
else:
print('You do not have that item to use.')
def do_drop(self, arg):
""""drop <item> - Drop an item from your inventory onto the ground."""
# put this value in a more suitably named variable
itemToDrop = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# find out if the player doesn't have that item
if itemToDrop not in invDescWords:
print('You do not have "%s" in your inventory.' % (itemToDrop))
return
# get the item name that the player's command describes
item = getFirstItemMatchingDesc(itemToDrop, inventory)
if item != None:
print('You drop %s.' % (worldItems[item][SHORTDESC]))
inventory.remove(item) # remove from inventory
worldRooms[location][GROUND].append(item) # add to the ground
# put items in a item container
def do_put(self, arg):
""""put <item> in <item> - Puts an item in a container."""
# put this value in a more suitably named variable
itemToStore = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# Nice little easter egg :)
if itemToStore == 'troll in bag':
print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end)
return
# find out if the player doesn't have that item
if itemToStore not in invDescWords:
print('You want to put "%s" in what?!' % (itemToStore))
return
# get the item name that the player's command describes
item = getFirstItemMatchingDesc(itemToStore, inventory)
if item != None:
print('You put %s. in the container.' % (worldItems[item][SHORTDESC]))
inventory.remove(item) # remove from inventory
worldRooms[location][ITEMINV].append(item) # add to the container
def do_store(self, arg):
""""store <item> - Stores an item in a safe place, assuming that the room has a storage area."""
# put this value in a more suitably named variable
itemToStore = arg.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
# Nice little easter egg :)
if itemToStore == 'troll in bag':
print(bcolors.start + "You cannot put troll in bag, troll is a creature." + bcolors.end)
return
# find out if the player doesn't have that item
if itemToStore not in invDescWords:
print('%s does not exist in your inventory, the ground, africa or your pockets, what a shame.' % (itemToStore))
return
# get the item name that the player's command describes
try:
item = getFirstItemMatchingDesc(itemToStore, inventory)
# broken currently, needs some work doing to check if the STORAGE value exists in the current room then store the item.
if item != None:
print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
inventory.remove(item)
worldRooms[location][STORAGE].append(item)
except KeyError:
return("Don't even think about it buster brown.")
#item = getFirstItemMatchingDesc(itemToStore, inventory)
#if item != None:
# print('You store %s in a safe place.' % (worldItems[item][SHORTDESC]))
# inventory.remove(item) # remove from inventory
# worldRooms[location][STORAGE].append(item) # add to the container
def complete_take(self, text, line, begidx, endidx):
possibleItems = []
text = text.lower()
# if the user has only typed "take" but no item name:
if not text:
return getAllFirstDescWords(worldRooms[location][GROUND])
# otherwise, get a list of all "description words" for ground items matching the command text so far:
for item in list(set(worldRooms[location][GROUND])):
for descWord in worldItems[item][DESCWORDS]:
if descWord.startswith(text) and worldItems[item].get(TAKEABLE, True):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
def complete_drop(self, text, line, begidx, endidx):
possibleItems = []
itemToDrop = text.lower()
# get a list of all "description words" for each item in the inventory
invDescWords = getAllDescWords(inventory)
for descWord in invDescWords:
if line.startswith('drop %s' % (descWord)):
return [] # command is complete
# if the user has only typed "drop" but no item name:
if itemToDrop == '':
return getAllFirstDescWords(inventory)
# otherwise, get a list of all "description words" for inventory items matching the command text so far:
for descWord in invDescWords:
if descWord.startswith(text):
possibleItems.append(descWord)
return list(set(possibleItems)) # make list unique
def do_look(self, arg):
"""Look at an item, direction, or the area:
"look" - display the current area's description
"look <direction>" - display the description of the area in that direction
"look exits" - display the description of all adjacent areas
"look <item>" - display the description of an item on the ground in storage or in your inventory"""
lookingAt = arg.lower()
if lookingAt == '':
# "look" will re-print the area description
displayLocation(location, default)
return
if lookingAt == 'exits':
for direction in (NORTH, SOUTH, EAST, WEST, UP, DOWN):
if direction in worldRooms[location]:
print('%s: %s' % (direction.title(), worldRooms[location][direction]))
return
if lookingAt in ('north', 'west', 'east', 'south', 'up', 'down', 'n', 'w', 'e', 's', 'u', 'd'):
if lookingAt.startswith('n') and NORTH in worldRooms[location]:
print(worldRooms[location][NORTH])
elif lookingAt.startswith('w') and WEST in worldRooms[location]:
print(worldRooms[location][WEST])
elif lookingAt.startswith('e') and EAST in worldRooms[location]:
print(worldRooms[location][EAST])
elif lookingAt.startswith('s') and SOUTH in worldRooms[location]:
print(worldRooms[location][SOUTH])
elif lookingAt.startswith('u') and UP in worldRooms[location]:
print(worldRooms[location][UP])
elif lookingAt.startswith('d') and DOWN in worldRooms[location]:
print(worldRooms[location][DOWN])
else:
print('There is nothing in that direction.')
return
# see if the item being looked at is on the ground at this location or in storage.
#item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND][STORAGE])
item = getFirstItemMatchingDesc(lookingAt, worldRooms[location][GROUND])
if item != None:
print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH)))
return
# see if the item being looked at is in the inventory
item = getFirstItemMatchingDesc(lookingAt, inventory)
if item != None:
| print('\n'.join(textwrap.wrap(worldItems[item][LONGDESC], SCREEN_WIDTH)))
return | conditional_block |
|
render_global.rs | FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM)));
fbo.allocate();
fbo
})));
}
// Reconfigure subsystems
self.separable_sss_system.reconfigure(event);
// Drop config for now
drop(config);
// Create query object
if self.frametime_query_object_gl == 0 {
self.frametime_query_object_gl = unsafe {
let mut query: gl::uint = 0;
gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query);
query
};
}
// Load shaders
self.reload_shaders();
Ok(())
}
fn reload_shaders(&mut self) {
// let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap();
// Log
println!("Reloading shaders!");
// Reload shaders from asset
self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset");
self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset");
// // Delete old shaders | // let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// if let Some(program) = self.program_post_resolve.take() {
// let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// Reload shader from assets
// // Load shaders
// self.program_ehaa_scene = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/scene_ehaa.vert.glsl"),
// &asset_folder.join("shaders/scene_ehaa.frag.glsl"),
// Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl"))
//// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// self.program_post_resolve = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/post_resolve.vert.glsl"),
// &asset_folder.join("shaders/post_resolve.frag.glsl"),
// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// Reload subsystem shaders
self.separable_sss_system.reload_shaders();
}
pub fn do_render_frame(&mut self) {
// Reload shaders if needed
if self.queued_shader_reload {
self.queued_shader_reload = false;
self.reload_shaders();
}
// Update cam state
// LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions
let active_camera = demo::demo_instance().get_test_camera();
let active_camera = if let Some(cam) = active_camera.upgrade() {
cam
} else {
// No active camera, so don't render anything for now
return;
};
let camera_fovy: Rad<f32>;
let camera_near_z: f32;
let camera_far_z: f32;
let cam_state = {
let cam = Mutex::lock(&active_camera).unwrap();
let mut state = RenderCameraState::new();
// Get camera fovy
// let projection: &dyn Any = cam.projection.as_ref();
// let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap();
camera_fovy = cam.projection.camera_fovy();
let (near_z, far_z) = cam.projection.test_depth_planes();
camera_near_z = near_z;
camera_far_z = far_z;
// Base matrix for our coordinate system (
let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector
state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation);
state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size);
state
};
let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix;
// Recompile shaders
if self.program_ehaa_scene.needs_recompile() {
self.program_ehaa_scene.do_recompile();
}
if self.program_post_composite.needs_recompile() {
self.program_post_composite.do_recompile();
}
unsafe {
gl::Disable(gl::FRAMEBUFFER_SRGB);
gl::Disable(gl::BLEND);
gl::Enable(gl::CULL_FACE);
gl::FrontFace(gl::CCW);
gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order?
gl::Enable(gl::DEPTH_TEST);
// Setup NDC z axis for reverse float depth
gl::DepthFunc(gl::GREATER);
gl::ClearDepth(0.0); // 0.0 is far with reverse z
gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE);
gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead
// Use scene shader
let scene_shader = self.program_ehaa_scene.program().unwrap();
let scene_shader_gl = scene_shader.program_gl().unwrap();
gl::UseProgram(scene_shader_gl);
// Bind scene framebuffer
let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need());
gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl());
// Set the viewport
gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei);
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
{// Upload matrices
let model_matrix = Matrix4::from_scale(1.0);
let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float);
let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float);
let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float);
}
let start_frametimer = {// Start frametime timer
let mut elapsed_frametime: u64 = std::u64::MAX;
gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime);
if elapsed_frametime != std::u64::MAX {
let _float_frametime = (elapsed_frametime as f64) / 1e6;
// let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime);
// self.window.need_mut().set_title(title.as_str());
// Restart query
gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl);
true
}
else {
false
}
};
// Set tessellation state
gl::PatchParameteri(gl::PATCH_VERTICES, 3);
gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr());
gl::PatchParameterfv(gl::PATCH | // if let Some(program) = self.program_ehaa_scene.take() { | random_line_split |
render_global.rs |
else {
// Create fbo
self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({
let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1);
fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F)));
fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F)));
fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8)));
fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM)));
fbo.allocate();
fbo
})));
}
// Reconfigure subsystems
self.separable_sss_system.reconfigure(event);
// Drop config for now
drop(config);
// Create query object
if self.frametime_query_object_gl == 0 {
self.frametime_query_object_gl = unsafe {
let mut query: gl::uint = 0;
gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query);
query
};
}
// Load shaders
self.reload_shaders();
Ok(())
}
fn reload_shaders(&mut self) {
// let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap();
// Log
println!("Reloading shaders!");
// Reload shaders from asset
self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset");
self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset");
// // Delete old shaders
// if let Some(program) = self.program_ehaa_scene.take() {
// let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// if let Some(program) = self.program_post_resolve.take() {
// let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// Reload shader from assets
// // Load shaders
// self.program_ehaa_scene = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/scene_ehaa.vert.glsl"),
// &asset_folder.join("shaders/scene_ehaa.frag.glsl"),
// Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl"))
//// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// self.program_post_resolve = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/post_resolve.vert.glsl"),
// &asset_folder.join("shaders/post_resolve.frag.glsl"),
// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// Reload subsystem shaders
self.separable_sss_system.reload_shaders();
}
pub fn do_render_frame(&mut self) {
// Reload shaders if needed
if self.queued_shader_reload {
self.queued_shader_reload = false;
self.reload_shaders();
}
// Update cam state
// LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions
let active_camera = demo::demo_instance().get_test_camera();
let active_camera = if let Some(cam) = active_camera.upgrade() {
cam
} else {
// No active camera, so don't render anything for now
return;
};
let camera_fovy: Rad<f32>;
let camera_near_z: f32;
let camera_far_z: f32;
let cam_state = {
let cam = Mutex::lock(&active_camera).unwrap();
let mut state = RenderCameraState::new();
// Get camera fovy
// let projection: &dyn Any = cam.projection.as_ref();
// let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap();
camera_fovy = cam.projection.camera_fovy();
let (near_z, far_z) = cam.projection.test_depth_planes();
camera_near_z = near_z;
camera_far_z = far_z;
// Base matrix for our coordinate system (
let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector
state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation);
state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size);
state
};
let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix;
// Recompile shaders
if self.program_ehaa_scene.needs_recompile() {
self.program_ehaa_scene.do_recompile();
}
if self.program_post_composite.needs_recompile() {
self.program_post_composite.do_recompile();
}
unsafe {
gl::Disable(gl::FRAMEBUFFER_SRGB);
gl::Disable(gl::BLEND);
gl::Enable(gl::CULL_FACE);
gl::FrontFace(gl::CCW);
gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order?
gl::Enable(gl::DEPTH_TEST);
// Setup NDC z axis for reverse float depth
gl::DepthFunc(gl::GREATER);
gl::ClearDepth(0.0); // 0.0 is far with reverse z
gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE);
gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead
// Use scene shader
let scene_shader = self.program_ehaa_scene.program().unwrap();
let scene_shader_gl = scene_shader.program_gl().unwrap();
gl::UseProgram(scene_shader_gl);
// Bind scene framebuffer
let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need());
gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl());
// Set the viewport
gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei);
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
{// Upload matrices
let model_matrix = Matrix4::from_scale(1.0);
let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float);
let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float);
let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float);
}
let start_frametimer = {// Start frametime timer
let mut elapsed_frametime: u64 = std::u64::MAX;
gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime);
if elapsed_frametime != std::u64::MAX | {
let mut fbo = RefCell::borrow_mut(t);
fbo.resize(event.resolution.0, event.resolution.1);
} | conditional_block |
|
render_global.rs | FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8)));
// fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM)));
fbo.allocate();
fbo
})));
}
// Reconfigure subsystems
self.separable_sss_system.reconfigure(event);
// Drop config for now
drop(config);
// Create query object
if self.frametime_query_object_gl == 0 {
self.frametime_query_object_gl = unsafe {
let mut query: gl::uint = 0;
gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query);
query
};
}
// Load shaders
self.reload_shaders();
Ok(())
}
fn | (&mut self) {
// let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap();
// Log
println!("Reloading shaders!");
// Reload shaders from asset
self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset");
self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset");
// // Delete old shaders
// if let Some(program) = self.program_ehaa_scene.take() {
// let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// if let Some(program) = self.program_post_resolve.take() {
// let mut program = RefCell::borrow_mut(&program);
// program.delete();
// }
// Reload shader from assets
// // Load shaders
// self.program_ehaa_scene = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/scene_ehaa.vert.glsl"),
// &asset_folder.join("shaders/scene_ehaa.frag.glsl"),
// Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl"))
//// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// self.program_post_resolve = Some({
// let mut s = ShaderProgram::new_from_file(
// &asset_folder.join("shaders/post_resolve.vert.glsl"),
// &asset_folder.join("shaders/post_resolve.frag.glsl"),
// None
// );
// s.compile();
// Rc::new(RefCell::new(s))
// });
// Reload subsystem shaders
self.separable_sss_system.reload_shaders();
}
pub fn do_render_frame(&mut self) {
// Reload shaders if needed
if self.queued_shader_reload {
self.queued_shader_reload = false;
self.reload_shaders();
}
// Update cam state
// LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions
let active_camera = demo::demo_instance().get_test_camera();
let active_camera = if let Some(cam) = active_camera.upgrade() {
cam
} else {
// No active camera, so don't render anything for now
return;
};
let camera_fovy: Rad<f32>;
let camera_near_z: f32;
let camera_far_z: f32;
let cam_state = {
let cam = Mutex::lock(&active_camera).unwrap();
let mut state = RenderCameraState::new();
// Get camera fovy
// let projection: &dyn Any = cam.projection.as_ref();
// let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap();
camera_fovy = cam.projection.camera_fovy();
let (near_z, far_z) = cam.projection.test_depth_planes();
camera_near_z = near_z;
camera_far_z = far_z;
// Base matrix for our coordinate system (
let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector
state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation);
state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size);
state
};
let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix;
// Recompile shaders
if self.program_ehaa_scene.needs_recompile() {
self.program_ehaa_scene.do_recompile();
}
if self.program_post_composite.needs_recompile() {
self.program_post_composite.do_recompile();
}
unsafe {
gl::Disable(gl::FRAMEBUFFER_SRGB);
gl::Disable(gl::BLEND);
gl::Enable(gl::CULL_FACE);
gl::FrontFace(gl::CCW);
gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order?
gl::Enable(gl::DEPTH_TEST);
// Setup NDC z axis for reverse float depth
gl::DepthFunc(gl::GREATER);
gl::ClearDepth(0.0); // 0.0 is far with reverse z
gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE);
gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead
// Use scene shader
let scene_shader = self.program_ehaa_scene.program().unwrap();
let scene_shader_gl = scene_shader.program_gl().unwrap();
gl::UseProgram(scene_shader_gl);
// Bind scene framebuffer
let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need());
gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl());
// Set the viewport
gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei);
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
{// Upload matrices
let model_matrix = Matrix4::from_scale(1.0);
let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float);
let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float);
let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into();
gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float);
}
let start_frametimer = {// Start frametime timer
let mut elapsed_frametime: u64 = std::u64::MAX;
gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime);
if elapsed_frametime != std::u64::MAX {
let _float_frametime = (elapsed_frametime as f64) / 1e6;
// let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime);
// self.window.need_mut().set_title(title.as_str());
// Restart query
gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl);
true
}
else {
false
}
};
// Set tessellation state
gl::PatchParameteri(gl::PATCH_VERTICES, 3);
gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr());
gl::PatchParameterfv(gl | reload_shaders | identifier_name |
spinup.py | type=int, help="Writing interval for spatial time series", default=10)
parser.add_argument(
"-f",
"--o_format",
dest="oformat",
choices=["netcdf3", "netcdf4_parallel", "pnetcdf"],
help="output format",
default="netcdf3",
)
parser.add_argument(
"-g", "--grid", dest="grid", type=int, choices=grid_choices, help="horizontal grid resolution", default=2000
)
parser.add_argument("--o_dir", dest="odir", help="output directory. Default: current directory", default="foo")
parser.add_argument(
"--o_size", dest="osize", choices=["small", "medium", "big", "big_2d"], help="output size type", default="medium"
)
parser.add_argument(
"-s",
"--system",
dest="system",
choices=list_systems(),
help="computer system to use.",
default="pleiades_broadwell",
)
parser.add_argument(
"--calving",
dest="calving",
choices=["float_kill", "ocean_kill", "eigen_calving", "thickness_calving", "vonmises_calving", "hybrid_calving"],
help="calving mechanism",
default="vonmises_calving",
)
parser.add_argument(
"--frontal_melt", dest="frontal_melt", action="store_true", help="Turn on frontal melt", default=False
)
parser.add_argument(
"--forcing_type", dest="forcing_type", choices=["ctrl", "e_age"], help="output size type", default="ctrl"
)
parser.add_argument(
"--hydrology",
dest="hydrology",
choices=["null", "diffuse", "routing"],
help="Basal hydrology model.",
default="diffuse",
)
parser.add_argument(
"-p", "--params", dest="params_list", help="Comma-separated list with params for sensitivity", default=None
)
parser.add_argument(
"--stable_gl",
dest="float_kill_calve_near_grounding_line",
action="store_true",
help="Stable grounding line",
default=False,
)
parser.add_argument(
"--stress_balance",
dest="stress_balance",
choices=["sia", "ssa+sia", "ssa"],
help="stress balance solver",
default="ssa+sia",
)
parser.add_argument(
"--vertical_velocity_approximation",
dest="vertical_velocity_approximation",
choices=["centered", "upstream"],
help="How to approximate vertical velocities",
default="upstream",
)
parser.add_argument("--start_year", dest="start_year", type=int, help="Simulation start year", default=0)
parser.add_argument("--end_year", dest="end_year", type=int, help="Simulation end year", default=10000)
options = parser.parse_args()
nn = options.n
odir = options.odir
oformat = options.oformat
osize = options.osize
queue = options.queue
walltime = options.walltime
system = options.system
calving = options.calving
climate = "elevation"
exstep = options.exstep
float_kill_calve_near_grounding_line = options.float_kill_calve_near_grounding_line
forcing_type = options.forcing_type
frontal_melt = options.frontal_melt
grid = options.grid
hydrology = options.hydrology
ocean = "const"
stress_balance = options.stress_balance
vertical_velocity_approximation = options.vertical_velocity_approximation
# Check which parameters are used for sensitivity study
params_list = options.params_list
do_T_max = False
do_eigen_calving_k = False
do_fice = False
do_fsnow = False
if params_list is not None:
params = params_list.split(",")
if "T_max" in params:
do_T_max = True
if "eigen_calving_k" in params:
do_eigen_calving_k = True
if "fice" in params:
do_fice = True
if "fsnow" in params:
do_fsnow = True
domain = options.domain
pism_exec = generate_domain(domain)
pism_dataname = "pism_outletglacier_g{}m.nc".format(grid)
pism_config = "init_config"
pism_config_nc = ".".join([pism_config, "nc"])
pism_config_cdl = os.path.join("../config", ".".join([pism_config, "cdl"]))
# Anaconda libssl problem on chinook
if system in ("chinook"):
ncgen = "/usr/bin/ncgen"
else:
|
cmd = [ncgen, "-o", pism_config_nc, pism_config_cdl]
sub.call(cmd)
if not os.path.isdir(odir):
os.mkdir(odir)
state_dir = "state"
scalar_dir = "scalar"
spatial_dir = "spatial"
for tsdir in (scalar_dir, spatial_dir, state_dir):
if not os.path.isdir(os.path.join(odir, tsdir)):
os.mkdir(os.path.join(odir, tsdir))
odir_tmp = "_".join([odir, "tmp"])
if not os.path.isdir(odir_tmp):
os.mkdir(odir_tmp)
# ########################################################
# set up model initialization
# ########################################################
ssa_e = 1.0
ssa_n_values = [3.25]
sia_e_values = [3]
ppq_values = [0.6]
tefo_values = [0.020]
phi_min_values = [15.0]
phi_max_values = [45.0]
topg_min_values = [-700]
topg_max_values = [1000]
combinations = list(
itertools.product(
sia_e_values,
ssa_n_values,
ppq_values,
tefo_values,
phi_min_values,
phi_max_values,
topg_min_values,
topg_max_values,
)
)
tsstep = "yearly"
scripts = []
scripts_post = []
simulation_start_year = options.start_year
simulation_end_year = options.end_year
for n, combination in enumerate(combinations):
sia_e, ssa_n, ppq, tefo, phi_min, phi_max, topg_min, topg_max = combination
ttphi = "{},{},{},{}".format(phi_min, phi_max, topg_min, topg_max)
name_options = OrderedDict()
name_options["sia_e"] = sia_e
name_options["ssa_n"] = ssa_n
name_options["ppq"] = ppq
name_options["tefo"] = tefo
# name_options['phi_min'] = phi_min
# name_options['phi_max'] = phi_max
# name_options['topg_min'] = topg_min
# name_options['topg_max'] = topg_max
name_options["calving"] = calving
full_exp_name = "_".join(["_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())])])
full_outfile = "{domain}_g{grid}m_{experiment}.nc".format(
domain=domain.lower(), grid=grid, experiment=full_exp_name
)
experiment = "_".join(
[
climate,
"_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())]),
"{}".format(simulation_start_year),
"{}".format(simulation_end_year),
]
)
# All runs in one script file for coarse grids that fit into max walltime
script = "init_{}_g{}m_{}.sh".format(domain.lower(), grid, full_exp_name)
scripts.append(script)
script_post = "init_{}_g{}m_{}_post.sh".format(domain.lower(), grid, full_exp_name)
scripts_post.append(script_post)
for filename in script:
try:
os.remove(filename)
except OSError:
pass
batch_header, batch_system = make_batch_header(system, nn, walltime, queue)
with open(script, "w") as f:
f.write(batch_header)
outfile = "{domain}_g{grid}m_{experiment}.nc".format(domain=domain.lower(), grid=grid, experiment=experiment)
prefix = generate_prefix_str(pism_exec)
general_params_dict = OrderedDict()
general_params_dict["bootstrap"] = ""
general_params_dict["i"] = pism_dataname
general_params_dict["ys"] = simulation_start_year
general_params_dict["ye"] = simulation_end_year
general_params_dict["o"] = os.path.join(odir, state_dir, outfile)
general_params_dict["o_format"] = oformat
general_params_dict["o_size"] = osize
general_params_dict["config_override"] = pism_config_nc
grid_params_dict = generate_grid_description(grid, domain)
sb_params_dict = OrderedDict()
sb_params_dict["sia_e"] = sia_e
sb_params_dict["ssa_e"] = ssa_e
sb_params_dict["ssa_n"] = ssa_n
sb_params_dict["ssa_dirichlet_bc"] = ""
sb_params_dict["pseudo_plastic_q"] = ppq
sb_params_dict["till_effective_fraction_overburden"] = tefo
sb_params_dict["topg_to_phi"] = ttphi
sb_params_dict["vertical_velocity_approximation"] = vertical_velocity_approximation
| ncgen = "ncgen" | conditional_block |
spinup.py | ", type=int, help="Writing interval for spatial time series", default=10)
parser.add_argument(
"-f",
"--o_format",
dest="oformat",
choices=["netcdf3", "netcdf4_parallel", "pnetcdf"],
help="output format",
default="netcdf3",
)
parser.add_argument(
"-g", "--grid", dest="grid", type=int, choices=grid_choices, help="horizontal grid resolution", default=2000
)
parser.add_argument("--o_dir", dest="odir", help="output directory. Default: current directory", default="foo")
parser.add_argument(
"--o_size", dest="osize", choices=["small", "medium", "big", "big_2d"], help="output size type", default="medium"
)
parser.add_argument(
"-s",
"--system",
dest="system",
choices=list_systems(),
help="computer system to use.",
default="pleiades_broadwell",
)
parser.add_argument(
"--calving",
dest="calving",
choices=["float_kill", "ocean_kill", "eigen_calving", "thickness_calving", "vonmises_calving", "hybrid_calving"],
help="calving mechanism",
default="vonmises_calving",
)
parser.add_argument(
"--frontal_melt", dest="frontal_melt", action="store_true", help="Turn on frontal melt", default=False
)
parser.add_argument(
"--forcing_type", dest="forcing_type", choices=["ctrl", "e_age"], help="output size type", default="ctrl"
)
parser.add_argument(
"--hydrology",
dest="hydrology",
choices=["null", "diffuse", "routing"],
help="Basal hydrology model.",
default="diffuse",
)
parser.add_argument(
"-p", "--params", dest="params_list", help="Comma-separated list with params for sensitivity", default=None
)
parser.add_argument(
"--stable_gl",
dest="float_kill_calve_near_grounding_line",
action="store_true",
help="Stable grounding line",
default=False,
)
parser.add_argument(
"--stress_balance",
dest="stress_balance",
choices=["sia", "ssa+sia", "ssa"],
help="stress balance solver",
default="ssa+sia",
)
parser.add_argument(
"--vertical_velocity_approximation",
dest="vertical_velocity_approximation",
choices=["centered", "upstream"],
help="How to approximate vertical velocities",
default="upstream",
)
parser.add_argument("--start_year", dest="start_year", type=int, help="Simulation start year", default=0)
parser.add_argument("--end_year", dest="end_year", type=int, help="Simulation end year", default=10000)
options = parser.parse_args()
nn = options.n
odir = options.odir
oformat = options.oformat
osize = options.osize
queue = options.queue
walltime = options.walltime
system = options.system
calving = options.calving
climate = "elevation"
exstep = options.exstep
float_kill_calve_near_grounding_line = options.float_kill_calve_near_grounding_line
forcing_type = options.forcing_type
frontal_melt = options.frontal_melt
grid = options.grid
hydrology = options.hydrology
ocean = "const"
stress_balance = options.stress_balance
vertical_velocity_approximation = options.vertical_velocity_approximation
# Check which parameters are used for sensitivity study
params_list = options.params_list
do_T_max = False
do_eigen_calving_k = False
do_fice = False
do_fsnow = False
if params_list is not None:
params = params_list.split(",")
if "T_max" in params:
do_T_max = True
if "eigen_calving_k" in params:
do_eigen_calving_k = True
if "fice" in params:
do_fice = True
if "fsnow" in params:
do_fsnow = True
domain = options.domain
pism_exec = generate_domain(domain)
pism_dataname = "pism_outletglacier_g{}m.nc".format(grid)
pism_config = "init_config"
pism_config_nc = ".".join([pism_config, "nc"])
pism_config_cdl = os.path.join("../config", ".".join([pism_config, "cdl"]))
# Anaconda libssl problem on chinook
if system in ("chinook"):
ncgen = "/usr/bin/ncgen"
else:
ncgen = "ncgen"
cmd = [ncgen, "-o", pism_config_nc, pism_config_cdl]
sub.call(cmd)
if not os.path.isdir(odir):
os.mkdir(odir)
state_dir = "state"
scalar_dir = "scalar"
spatial_dir = "spatial"
for tsdir in (scalar_dir, spatial_dir, state_dir):
if not os.path.isdir(os.path.join(odir, tsdir)):
os.mkdir(os.path.join(odir, tsdir))
odir_tmp = "_".join([odir, "tmp"])
if not os.path.isdir(odir_tmp):
os.mkdir(odir_tmp)
# ########################################################
# set up model initialization
# ########################################################
ssa_e = 1.0
ssa_n_values = [3.25]
sia_e_values = [3]
ppq_values = [0.6]
tefo_values = [0.020]
phi_min_values = [15.0]
phi_max_values = [45.0]
topg_min_values = [-700]
topg_max_values = [1000]
combinations = list( | ssa_n_values,
ppq_values,
tefo_values,
phi_min_values,
phi_max_values,
topg_min_values,
topg_max_values,
)
)
tsstep = "yearly"
scripts = []
scripts_post = []
simulation_start_year = options.start_year
simulation_end_year = options.end_year
for n, combination in enumerate(combinations):
sia_e, ssa_n, ppq, tefo, phi_min, phi_max, topg_min, topg_max = combination
ttphi = "{},{},{},{}".format(phi_min, phi_max, topg_min, topg_max)
name_options = OrderedDict()
name_options["sia_e"] = sia_e
name_options["ssa_n"] = ssa_n
name_options["ppq"] = ppq
name_options["tefo"] = tefo
# name_options['phi_min'] = phi_min
# name_options['phi_max'] = phi_max
# name_options['topg_min'] = topg_min
# name_options['topg_max'] = topg_max
name_options["calving"] = calving
full_exp_name = "_".join(["_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())])])
full_outfile = "{domain}_g{grid}m_{experiment}.nc".format(
domain=domain.lower(), grid=grid, experiment=full_exp_name
)
experiment = "_".join(
[
climate,
"_".join(["_".join([k, str(v)]) for k, v in list(name_options.items())]),
"{}".format(simulation_start_year),
"{}".format(simulation_end_year),
]
)
# All runs in one script file for coarse grids that fit into max walltime
script = "init_{}_g{}m_{}.sh".format(domain.lower(), grid, full_exp_name)
scripts.append(script)
script_post = "init_{}_g{}m_{}_post.sh".format(domain.lower(), grid, full_exp_name)
scripts_post.append(script_post)
for filename in script:
try:
os.remove(filename)
except OSError:
pass
batch_header, batch_system = make_batch_header(system, nn, walltime, queue)
with open(script, "w") as f:
f.write(batch_header)
outfile = "{domain}_g{grid}m_{experiment}.nc".format(domain=domain.lower(), grid=grid, experiment=experiment)
prefix = generate_prefix_str(pism_exec)
general_params_dict = OrderedDict()
general_params_dict["bootstrap"] = ""
general_params_dict["i"] = pism_dataname
general_params_dict["ys"] = simulation_start_year
general_params_dict["ye"] = simulation_end_year
general_params_dict["o"] = os.path.join(odir, state_dir, outfile)
general_params_dict["o_format"] = oformat
general_params_dict["o_size"] = osize
general_params_dict["config_override"] = pism_config_nc
grid_params_dict = generate_grid_description(grid, domain)
sb_params_dict = OrderedDict()
sb_params_dict["sia_e"] = sia_e
sb_params_dict["ssa_e"] = ssa_e
sb_params_dict["ssa_n"] = ssa_n
sb_params_dict["ssa_dirichlet_bc"] = ""
sb_params_dict["pseudo_plastic_q"] = ppq
sb_params_dict["till_effective_fraction_overburden"] = tefo
sb_params_dict["topg_to_phi"] = ttphi
sb_params_dict["vertical_velocity_approximation"] = vertical_velocity_approximation
| itertools.product(
sia_e_values, | random_line_split |
polynom.py |
def __bool__(self):
return bool(self.v)
def __add__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return ModInt(a.v + b.v, a.n)
def __radd__(a, b):
assert isinstance(b, int)
return ModInt(a.v + b, a.n)
def __neg__(a): return ModInt(-a.v, a.n)
def __sub__(a, b): return ModInt(a.v - b.v, a.n)
def __mul__(a, b):
if isinstance(b, int):
return ModInt(b * a.v, a.n)
elif isinstance(b, ModInt):
assert a.n == b.n
return ModInt(a.v * b.v, a.n)
return NotImplemented
def __rmul__(a, b):
return a * b
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def inv(self):
if self.v == 0:
raise ZeroDivisionError
return ModInt(ModInt._inv(self.v, self.n), self.n)
@staticmethod
def _inv(k, n):
k %= n
if k == 1:
return k
return (n - n // k) * ModInt._inv(n % k, n) % n
def __truediv__(a, b):
assert isinstance(b, ModInt)
assert a.n == b.n
return a * b.inv()
def __rtruediv__(a, k):
assert isinstance(k, int)
return ModInt(k, a.n) / a
@staticmethod
def extended_euclid(a, b):
"""Extended Euclid algorithm
Return
------
x : int
y : int
a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: |
def __hash__(self):
return hash((self.v, self.n)) | random_line_split |
|
polynom.py | a * x + b * y = gcd(a, b)
"""
A, B = a, b
sa, sb = (1 if a >= 0 else -1), (1 if b >= 0 else -1)
xp, yp = 1, 0
x, y = 0, 1
while b:
assert A * xp + B * yp == a
assert A * x + B * y == b
r = a // b
a, b = b, a % b
x, xp = xp - r * x, x
y, yp = yp - r * y, y
return sa * xp, sb * yp
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v, self.n)
def __str__(self):
return '%s' % self.v
class Polynomial:
"""
Generic class for polynomials
Works with int, float and ModInt
"""
def __len__(self):
return len(self.C)
def trim(C):
i = len(C) - 1
while i >= 0 and not C[i]:
i -= 1
return C[:i + 1]
def __init__(self, C=None):
if C is None:
C = []
self.C = Polynomial.trim(C)
@property
def deg(self):
return len(self.C) - 1
def prime(self): return Polynomial([i * self[i]
for i in range(1, len(self))])
def eval(self, x):
if not self:
return 0
v = self[-1]
for c in self[-2::-1]:
v = v * x + c
return v
def shift(self, d): return Polynomial(
[0 * self[0]] * d + self.C if self else [])
def __eq__(P, Q):
return P.deg == Q.deg and all(cP == cQ for cP, cQ in zip(P, Q))
def __hash__(self):
return hash(tuple(self.C))
def __call__(self, x): return Polynomial.eval(self, x)
def __getitem__(self, x): return self.C[x]
def __neg__(P): return Polynomial([-c for c in P.C])
def __add__(P, Q):
if len(P.C) < len(Q.C):
P, Q = Q, P
return Polynomial([P[d] + Q[d] for d in range(len(Q))] + P[len(Q):])
def __sub__(P, Q): return P + (-Q)
def _mulpoly(P, Q):
assert isinstance(Q, Polynomial)
return Polynomial([sum(P[k] * Q[d - k]
for k in range(max(0, d + 1 - len(Q)),
min(d + 1, len(P)))
) for d in range(len(P) + len(Q) - 1)])
def _mulscal(P, k):
return Polynomial([k * c for c in P])
def __mul__(P, Q):
if isinstance(Q, Polynomial):
return P._mulpoly(Q)
return P._mulscal(Q)
def __rmul__(P, Q):
return P * Q
def __pow__(P, k):
assert isinstance(k, int)
V = 1
A = P
while k:
if k & 1:
V *= A
k >>= 1
if not k:
break
A *= A
return V
def __iter__(self):
yield from self.C
def euclidean_division(A, B):
Q = [0 * B[0]] * max(0, len(A) - len(B) + 1)
while len(A.C) >= len(B.C):
Q[len(A.C) - len(B.C)] = A[-1] / B[-1]
A -= B.shift(len(A) - len(B)) * (A[-1] / B[-1])
return Polynomial(Q), A
def __floordiv__(A, B):
assert isinstance(B, Polynomial)
return A.euclidean_division(B)[0]
def __mod__(A, B):
"""
Polynomial euclidian division
or modular reduction
"""
if isinstance(B, Polynomial):
return A.euclidean_division(B)[1]
else:
assert isinstance(B, int)
assert all(isinstance(c, int) for c in A)
return A.reduceP(B)
def __lt__(A, B): return A.deg < B.deg
def __bool__(self): return bool(self.C)
def gcd(A, B):
while B:
A, B = B, A % B
return A * (1 / A[-1])
@staticmethod
def gaussianElimKer(M, zero, one):
"""
Outputs an element of the kernel of M
zero and one are elements of the same field
"""
# V satisfies the invariant
# M = V M_0
V = [Polynomial([zero] * i + [one]) for i in range(len(M))]
pivots = [None] * (len(M) + 1)
for l in range(len(M)):
while M[l].deg >= 0:
idp = M[l].deg
if pivots[idp] is None:
pivots[idp] = l
break
else:
c = M[l][idp] / M[pivots[idp]][idp]
M[l] -= c * M[pivots[idp]]
V[l] -= c * V[pivots[idp]]
else:
# If a line is null, we found an element of the kernel
return V[l]
return None
def computeQ(P):
# only for Z/pZ[X] square-free polynoms, for p prime
p = P[0].n
# We ignore the image of 1 because (F-Id)(1) = 0
M = [Polynomial(([ModInt(0, p)] * (i * p)) + [ModInt(1, p)]) % P
for i in range(1, P.deg)]
# M -= Id
for i in range(1, P.deg):
M[i - 1] -= Polynomial([ModInt(0, p)] * i + [ModInt(1, p)])
# We find an element of the kernel by Gaussian elimination
pQ = Polynomial.gaussianElimKer(M, ModInt(0, p), ModInt(1, p))
# We put back the 1 tha was removed
return pQ.shift(1) if pQ is not None else None
def factor_unit(P):
"""
Berlekamp's algorithm
only in Z/pZ
"""
assert all(isinstance(c, ModInt) for c in P)
assert len(set(c.n for c in P)) == 1
if P.deg == 1:
return defaultdict(int, {P: 1})
p = P[0].n
S = Polynomial.gcd(P, P.prime())
if S.deg == P.deg:
# P' = 0 so P = R^p
R = Polynomial(P.C[::p])
return defaultdict(int,
{D: p * v
for D, v in Polynomial.factor_unit(R).items()})
else:
factors = defaultdict(int)
if S.deg:
for D, v in S.factor_unit().items():
factors[D] += v
P //= S
# P is now square-free
# We look for Q in Ker(F-Id) \ {1}
Q = Polynomial.computeQ(P)
if Q is None:
# P is irreducible
factors[P] += 1
else:
# P is the product of the gcd(P, Q-i)
# that are factored recursively
for i in range(p):
D = Polynomial.gcd(P, Q - Polynomial([ModInt(i, p)]))
if D.deg:
for DD, v in D.factor_unit().items():
factors[DD] += v
return factors
def factor(P):
"""
Factorization of P
only in Z/pZ
"""
cd = P[-1]
if P.deg == 0:
return (cd, defaultdict(int))
P = P * (1 / cd)
return (cd, P.factor_unit())
@staticmethod
def ppfactors(fz):
c, Ds = fz
a = str(c) if not Ds or c * c != c else ''
l = [a] + [(str(D) if D.deg == 1 and not D[0] else ('(%s)' % D))
+ (v > 1) * ('^%s' % v)
for D, v in sorted(Ds.items(),
key=lambda e: (e[0].deg, e[1]))]
return '⋅'.join(i for i in l if i)
def reduceP(P, p):
re | turn Polynomial([ModInt(c, p) for c in P])
| identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.