file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
index.js
const HOURS_TO_MILLISECONDS = 3600 * 1000; const client = new Discord.Client(); const converter = new showdown.Converter(); // use Keyv with sqlite storage const sqlite_uri = "sqlite://db.sqlite3"; const discordUserId2token = new Keyv(sqlite_uri, { namespace: "discord_user_id_to_token" }); // Discord User-ID / token pairs const token2nethzHash = new Keyv(sqlite_uri, { namespace: "token_to_nethz_hash" }); // nethz / token pairs const verifiedNethzHashs = new Keyv(sqlite_uri, { namespace: "verified_nethz_hashs" }); // the set of hashs of nethzs already used for verification (only the keys are relevant; value is always `true`) discordUserId2token.on('error', err => console.error('Keyv connection error:', err)); token2nethzHash.on('error', err => console.error('Keyv connection error:', err)); verifiedNethzHashs.on('error', err => console.error('Keyv connection error:', err)); client.login(config.token); const botMail = config.transportOptions.auth; const sampleNethz = "jsmith"; const sampleToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"; const sampleDiscordUsername = "john_sm_01"; const availableCommandsStr = `Available commands: \`!ping\`: make me say Pong \`!nethz\`: tell me your nethz; e.g \`!nethz ${sampleNethz}\` \`!token\`: tell me the token I sent you; e.g \`!token ${sampleToken}\` \`!welcomeagain\`: **print the welcome message again, with all the instructions for the verification process** \`!help\`: print this message `; const adminCommandsStr = `Admin-only commands: \`!unmark\` (admin only): unmark a nethz as "already used for verification"; e.g \`!unmark ${sampleNethz}\` \`!mark\` (admin only): mark a nethz as "already used for verification"; e.g \`!mark ${sampleNethz}\` \`!purgereqs\` (admin only): delete all active tokens, by clearing discordUserId2token and token2nethzHash WARNING: this leads to unexpected behaviour from the point of view of users who are pending verification... \`!purgemarks\` (admin only): unmark all nethzs, by clearing verifiedNethzHashs. WARNING: doing this is rarely a good idea... \`!verify\` (admin only): manually verify a user; e.g \`!verify @${sampleDiscordUsername}\` \`!adminhelp\` (admin only): print this message (Note: admin commands are only used in the admin channel #${config.adminChannelName}, whereas normal commands are only used in DM channels.) `; const welcomeMsg = (guildName) => `Hello! I see you just joined the server **${guildName}**. You are currently not verified as an ETH student on **${guildName}**, so you only have access to a restricted number of channels. To verify yourself as an ETH student, 1. please tell me your nethz (i.e ETH username) in the following format: \`!nethz \` + your nethz; e.g: \`!nethz ${sampleNethz}\` 2. I will send an email at <nethz>@student.ethz.ch containing a token 3. then, show me that you did receive the token, by telling me: \`!token \` + the token; e.g: \`!token ${sampleToken}\` Remarks: - To reset the process, e.g if you misspelled your nethz, just do step 1 again. (I will invalidate the previous token, don't worry.) - My email address, which I will use in step 2, is ${botMail.user}; please check in your spam folder if you don't receive anything. (Note that no human will check the inbox of ${botMail.user}, except for debugging.) - Once you receive the email, you have ${config.tokenTTL} hours to accomplish step 3, as the token expires after that duration. - I will store a salted hash of your nethz in database. (This is to prevent a student from verifying multiple Discord accounts.) I will *not* keep track of which Discord account your nethz corresponds to, and vice-versa. I am a very stupid bot. If you have any questions or encounter any problem, please send a message to an admin of **${guildName}** directly. `; const genMailContent = (discordUsername, token, guildName, botName) => `Hello, \n You have recently joined the Discord server **${guildName}**, under the username **${discordUsername}**, and provided your nethz (i.e ETH username) for verification.\n To finish the verification process, please check your Direct Message channel with me (**${botName}**) and send me the following token within ${config.tokenTTL} hours: \n ${token}\n If you did not join the Discord server **${guildName}** and tell me your nethz, then someone else provided your nethz. Then you don't need to do anything; the token will expire in ${config.tokenTTL} hours.\n Note that I am a Discord bot and that this email was autogenerated, so please don't reply to it. (You can reply if you really want to but no human will ever see it.)\n If you really need to, you can always contact ${config.emergencyContact.fullName}, your fellow ETH student who runs the Discord server **${guildName}**.\n \nBest regards,\n ${botName} `; // create reusable transporter object using the default SMTP transport const transporter = nodemailer.createTransport(config.transportOptions); // verify connection configuration transporter.verify(function (error, success) { if (error) { console.log(error); } else { console.assert(success); console.log("SMTP server is ready to take our messages"); } }); client.once('ready', async () => { const theGuild = client.guilds.cache.get(config.theGuildId); if (!theGuild.available) { console.warn("theGuild.available is false (it indicates a server outage)"); } // check that the bot can read/write in the config.adminChannelName channel const adminChannel = theGuild.channels.cache.find(channel => channel.name === config.adminChannelName); const readWritePerms = ['VIEW_CHANNEL', 'SEND_MESSAGES']; if (!theGuild.me.permissionsIn(adminChannel).has(readWritePerms)) { throw Error(`bot doesn't have read/write permission in admin channel ${config.adminChannelName}`); } // create role config.roleName if does not exist if (!theGuild.roles.cache.some(role => role.name === config.roleName)) { theGuild.createRole({ name: config.roleName }) .then(role => console.log(`Created new role with name ${role.name} and color ${role.color}`)) .catch(console.error); } // check that we can send email const textContent = `yo yo yo this is a test email. The bot "${client.user.username}" was just started on host ${hostname}.`; const info = await transporter.sendMail({ from: { name: client.user.username, address: botMail.user }, to: botMail.user, subject: `Test email (${client.user.username} bot startup)`, text: textContent, html: converter.makeHtml(textContent.replace('\n', '\n\n')) }); console.log("Message sent: %s", info.messageId); console.log('Ready!'); }); const prefix = config.prefix; client.on('message', async message => { if (message.author.bot) return; if (message.channel.type === 'text' && message.channel.guild.id === config.theGuildId && message.channel.name === config.adminChannelName) { if (!message.content.startsWith(prefix)) return; const args = message.content.slice(prefix.length).split(/ +/); const command = args.shift().toLowerCase(); if (command === 'unmark') { if (!args.length) { return message.channel.send(`You didn't provide any nethz! Usage: e.g \`!unmark ${sampleNethz}\``); } else if (args.length > 1) { return message.channel.send(`You provided too many arguments... Usage: e.g \`!unmark ${sampleNethz}\``); } else { const nethz = args[0].toLowerCase(); const nethzHash = sha512(nethz, config.commonSalt); if (! await verifiedNethzHashs.get
{ // The id is the first and only match found by the RegEx. const matches = mention.match(/^<@!?(\d+)>$/); // If supplied variable was not a mention, matches will be null instead of an array. if (!matches) return; // However the first element in the matches array will be the entire mention, not just the ID, so use index 1. const id = matches[1]; return client.users.cache.get(id); }
identifier_body
chart_area.py
chart_tools.format_axis_y(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) for area in range(1, 9, 1): suppress_area = P_DICT.get(f"suppressArea{area}", False) # If the area is suppressed, remind the user they suppressed it. if suppress_area: LOG['Info'].append( f"[{CHART_NAME}] Area {area} is suppressed by user setting. You can re-enable it " f"in the device configuration menu." ) # ============================== Plot the Areas =============================== # Plot the areas. If suppress_area is True, we skip it. if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # If area color is the same as the background color, alert the user. if P_DICT[f'area{area}Color'] == P_DICT['backgroundColor'] and not suppress_area: LOG['Warning'].append( f"[{CHART_NAME}] Area {area} color is the same as the background color (so " f"you may not be able to see it)." ) data_path = PLUG_DICT['dataPath'] area_source = P_DICT[f'area{area}Source'] data_column = chart_tools.get_data(data_source=f'{data_path}{area_source}', logger=LOG) if PLUG_DICT['verboseLogging']: LOG['Threaddebug'].append(f"[{CHART_NAME}] Data for Area {area}: {data_column}") # Pull the headers P_DICT['headers'].append(data_column[0][1]) del data_column[0] # Pull the observations into distinct lists for charting. for element in data_column: P_DICT[f'x_obs{area}'].append(element[0]) P_DICT[f'y_obs{area}'].append(float(element[1])) # ============================= Adjustment Factor ============================= # Allows user to shift data on the Y axis (for example, to display multiple binary # sources on the same chart.) if PROPS[f'area{area}adjuster'] != "": temp_list = [] for obs in P_DICT[f'y_obs{area}']: expr = f"{obs}{PROPS[f'area{area}adjuster']}" temp_list.append(chart_tools.eval_expr(expr=expr)) P_DICT[f'y_obs{area}'] = temp_list # ================================ Prune Data ================================= # Prune the data if warranted dates_to_plot = P_DICT[f'x_obs{area}'] try: limit = float(PROPS['limitDataRangeLength']) except ValueError: limit = 0 if limit > 0: y_obs = P_DICT[f'y_obs{area}'] new_old = PROPS['limitDataRange'] x_index = f'x_obs{area}' y_index = f'y_obs{area}' P_DICT[x_index], P_DICT[y_index] = chart_tools.prune_data( x_data=dates_to_plot, y_data=y_obs, limit=limit, new_old=new_old, logger=LOG ) # ======================== Convert Dates for Charting ========================= P_DICT[f'x_obs{area}'] = \ chart_tools.format_dates(list_of_dates=P_DICT[f'x_obs{area}'], logger=LOG) _ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']] # We need to plot all the stacks at once, so we create some tuples to hold the data we # need later. Y_OBS_TUPLE += (P_DICT[f'y_obs{area}'],) Y_COLORS_TUPLE += (P_DICT[f'area{area}Color'],) X_OBS = P_DICT[f'x_obs{area}'] # ================================ Annotations ================================ # New annotations code begins here - DaveL17 2019-06-05 for _ in range(1, area + 1, 1): tup = () # We start with the ordinal list and create a tuple to hold all the lists that # come before it. for k in range(_, 0, -1): tup += (P_DICT[f'y_obs{area}'],) # The relative value is the sum of each list element plus the ones that come before # it (i.e., tup[n][0] + tup[n-1][0] + tup[n-2][0] Y_OBS_TUPLE_REL[f'y_obs{area}'] = [sum(t) for t in zip(*tup)] annotate = P_DICT[f'area{area}Annotate'] precision = int(PROPS.get(f'area{area}AnnotationPrecision', "0")) if annotate: for xy in zip(P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}']): ax.annotate( f"{float(xy[1]):.{precision}f}", xy=xy, xytext=(0, 0), zorder=10, **K_DICT['k_annotation'] ) y_data = chart_tools.hide_anomalies(data=Y_OBS_TUPLE[0], props=PROPS, logger=LOG) ax.stackplot( X_OBS, Y_OBS_TUPLE, edgecolor=None, colors=Y_COLORS_TUPLE, zorder=10, lw=0, **K_DICT['k_line'] ) # ============================== Y1 Axis Min/Max ============================== # Min and Max are not 'None'. The p_dict['data_array'] contains individual data points and # doesn't take into account the additive nature of the plot. Therefore, we get the axis scaling # values from the plot and then use those for min/max. _ = [P_DICT['data_array'].append(node) for node in ax.get_ylim()] chart_tools.format_axis_y1_min_max(p_dict=P_DICT, logger=LOG) # Transparent Chart Fill if P_DICT['transparent_charts'] and P_DICT['transparent_filled']: ax.add_patch( patches.Rectangle( (0, 0), 1, 1, transform=ax.transAxes, facecolor=P_DICT['faceColor'], zorder=1 ) ) # ================================== Legend =================================== if P_DICT['showLegend']: # Amend the headers if there are any custom legend entries defined. counter = 1 final_headers = [] headers = P_DICT['headers'] # headers = [_ for _ in P_DICT['headers']] # headers = [_.decode('utf-8') for _ in P_DICT['headers']] for header in headers: if P_DICT[f'area{counter}Legend'] == "": final_headers.append(header) else: final_headers.append(P_DICT[f'area{counter}Legend']) counter += 1 # Set the legend # Reorder the headers and colors so that they fill by row instead of by column num_col = int(P_DICT['legendColumns']) iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)]) final_headers = list(iter_headers) iter_colors = itertools.chain(*[Y_COLORS_TUPLE[i::num_col] for i in range(num_col)]) final_colors = list(iter_colors) # Note that the legend does not support the PolyCollection created by the stackplot. # Therefore, we have to use a proxy artist. https://stackoverflow.com/a/14534830/2827397 p1 = patches.Rectangle((0, 0), 1, 1) p2 = patches.Rectangle((0, 0), 1, 1) legend = ax.legend( [p1, p2], final_headers, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=num_col, prop={'size': float(P_DICT['legendFontSize'])} ) # Set legend font color _ = [text.set_color(P_DICT['fontColor']) for text in legend.get_texts()] # Set legend area color num_handles = len(legend.legendHandles) _ = [legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)] frame = legend.get_frame() frame.set_alpha(0) for area in range(1, 9, 1):
suppress_area = P_DICT.get(f'suppressArea{area}', False) if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # Note that we do these after the legend is drawn so that these areas don't affect the # legend. # We need to reload the dates to ensure that they match the area being plotted # dates_to_plot = self.format_dates(p_dict[f'x_obs{area}']) # =============================== Best Fit Line =============================== if PROPS.get(f'line{area}BestFit', False): chart_tools.format_best_fit_line_segments( ax=ax, dates_to_plot=P_DICT[f'x_obs{area}'], line=area, p_dict=P_DICT, logger=LOG ) _ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']]
conditional_block
chart_area.py
(): ... try: ax = chart_tools.make_chart_figure( width=P_DICT['chart_width'], height=P_DICT['chart_height'], p_dict=P_DICT ) chart_tools.format_axis_x_ticks(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) chart_tools.format_axis_y(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) for area in range(1, 9, 1): suppress_area = P_DICT.get(f"suppressArea{area}", False) # If the area is suppressed, remind the user they suppressed it. if suppress_area: LOG['Info'].append( f"[{CHART_NAME}] Area {area} is suppressed by user setting. You can re-enable it " f"in the device configuration menu." ) # ============================== Plot the Areas =============================== # Plot the areas. If suppress_area is True, we skip it. if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # If area color is the same as the background color, alert the user. if P_DICT[f'area{area}Color'] == P_DICT['backgroundColor'] and not suppress_area: LOG['Warning'].append( f"[{CHART_NAME}] Area {area} color is the same as the background color (so " f"you may not be able to see it)." ) data_path = PLUG_DICT['dataPath'] area_source = P_DICT[f'area{area}Source'] data_column = chart_tools.get_data(data_source=f'{data_path}{area_source}', logger=LOG) if PLUG_DICT['verboseLogging']: LOG['Threaddebug'].append(f"[{CHART_NAME}] Data for Area {area}: {data_column}") # Pull the headers P_DICT['headers'].append(data_column[0][1]) del data_column[0] # Pull the observations into distinct lists for charting. for element in data_column: P_DICT[f'x_obs{area}'].append(element[0]) P_DICT[f'y_obs{area}'].append(float(element[1])) # ============================= Adjustment Factor ============================= # Allows user to shift data on the Y axis (for example, to display multiple binary # sources on the same chart.) if PROPS[f'area{area}adjuster'] != "": temp_list = [] for obs in P_DICT[f'y_obs{area}']: expr = f"{obs}{PROPS[f'area{area}adjuster']}" temp_list.append(chart_tools.eval_expr(expr=expr)) P_DICT[f'y_obs{area}'] = temp_list # ================================ Prune Data ================================= # Prune the data if warranted dates_to_plot = P_DICT[f'x_obs{area}'] try: limit = float(PROPS['limitDataRangeLength']) except ValueError: limit = 0 if limit > 0: y_obs = P_DICT[f'y_obs{area}'] new_old = PROPS['limitDataRange'] x_index = f'x_obs{area}' y_index = f'y_obs{area}' P_DICT[x_index], P_DICT[y_index] = chart_tools.prune_data( x_data=dates_to_plot, y_data=y_obs, limit=limit, new_old=new_old, logger=LOG ) # ======================== Convert Dates for Charting ========================= P_DICT[f'x_obs{area}'] = \ chart_tools.format_dates(list_of_dates=P_DICT[f'x_obs{area}'], logger=LOG) _ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']] # We need to plot all the stacks at once, so we create some tuples to hold the data we # need later. Y_OBS_TUPLE += (P_DICT[f'y_obs{area}'],) Y_COLORS_TUPLE += (P_DICT[f'area{area}Color'],) X_OBS = P_DICT[f'x_obs{area}'] # ================================ Annotations ================================ # New annotations code begins here - DaveL17 2019-06-05 for _ in range(1, area + 1, 1): tup = () # We start with the ordinal list and create a tuple to hold all the lists that # come before it. for k in range(_, 0, -1): tup += (P_DICT[f'y_obs{area}'],) # The relative value is the sum of each list element plus the ones that come before # it (i.e., tup[n][0] + tup[n-1][0] + tup[n-2][0] Y_OBS_TUPLE_REL[f'y_obs{area}'] = [sum(t) for t in zip(*tup)] annotate = P_DICT[f'area{area}Annotate'] precision = int(PROPS.get(f'area{area}AnnotationPrecision', "0")) if annotate: for xy in zip(P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}']): ax.annotate( f"{float(xy[1]):.{precision}f}", xy=xy, xytext=(0, 0), zorder=10, **K_DICT['k_annotation'] ) y_data = chart_tools.hide_anomalies(data=Y_OBS_TUPLE[0], props=PROPS, logger=LOG) ax.stackplot( X_OBS, Y_OBS_TUPLE, edgecolor=None, colors=Y_COLORS_TUPLE, zorder=10, lw=0, **K_DICT['k_line'] ) # ============================== Y1 Axis Min/Max ============================== # Min and Max are not 'None'. The p_dict['data_array'] contains individual data points and # doesn't take into account the additive nature of the plot. Therefore, we get the axis scaling # values from the plot and then use those for min/max. _ = [P_DICT['data_array'].append(node) for node in ax.get_ylim()] chart_tools.format_axis_y1_min_max(p_dict=P_DICT, logger=LOG) # Transparent Chart Fill if P_DICT['transparent_charts'] and P_DICT['transparent_filled']: ax.add_patch( patches.Rectangle( (0, 0), 1, 1, transform=ax.transAxes, facecolor=P_DICT['faceColor'], zorder=1 ) ) # ================================== Legend =================================== if P_DICT['showLegend']: # Amend the headers if there are any custom legend entries defined. counter = 1 final_headers = [] headers = P_DICT['headers'] # headers = [_ for _ in P_DICT['headers']] # headers = [_.decode('utf-8') for _ in P_DICT['headers']] for header in headers: if P_DICT[f'area{counter}Legend'] == "": final_headers.append(header) else: final_headers.append(P_DICT[f'area{counter}Legend']) counter += 1 # Set the legend # Reorder the headers and colors so that they fill by row instead of by column num_col = int(P_DICT['legendColumns']) iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)]) final_headers = list(iter_headers) iter_colors = itertools.chain(*[Y_COLORS_TUPLE[i::num_col] for i in range(num_col)]) final_colors = list(iter_colors) # Note that the legend does not support the PolyCollection created by the stackplot. # Therefore, we have to use a proxy artist. https://stackoverflow.com/a/14534830/2827397 p1 = patches.Rectangle((0, 0), 1, 1) p2 = patches.Rectangle((0, 0), 1, 1) legend = ax.legend( [p1, p2], final_headers, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=num_col, prop={'size': float(P_DICT['legendFontSize'])} ) # Set legend font color _ = [text.set_color(P_DICT['fontColor']) for text in legend.get_texts()] # Set legend area color num_handles = len(legend.legendHandles) _ = [legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)] frame = legend.get_frame() frame.set_alpha(0) for area in range(1, 9, 1): suppress_area = P_DICT.get(f'suppressArea{area}', False) if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # Note that we do these after the legend is drawn so that these areas don't affect the # legend. # We need to reload the dates to ensure that they match the area being plotted # dates_to_plot = self.format_dates(p_dict[f'x_obs{area}']) # =============================== Best Fit Line =============================== if PROPS.get(f'line{area}BestFit', False): chart_tools.format_best
__init__
identifier_name
chart_area.py
=K_DICT, logger=LOG) chart_tools.format_axis_y(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) for area in range(1, 9, 1): suppress_area = P_DICT.get(f"suppressArea{area}", False) # If the area is suppressed, remind the user they suppressed it.
# ============================== Plot the Areas =============================== # Plot the areas. If suppress_area is True, we skip it. if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # If area color is the same as the background color, alert the user. if P_DICT[f'area{area}Color'] == P_DICT['backgroundColor'] and not suppress_area: LOG['Warning'].append( f"[{CHART_NAME}] Area {area} color is the same as the background color (so " f"you may not be able to see it)." ) data_path = PLUG_DICT['dataPath'] area_source = P_DICT[f'area{area}Source'] data_column = chart_tools.get_data(data_source=f'{data_path}{area_source}', logger=LOG) if PLUG_DICT['verboseLogging']: LOG['Threaddebug'].append(f"[{CHART_NAME}] Data for Area {area}: {data_column}") # Pull the headers P_DICT['headers'].append(data_column[0][1]) del data_column[0] # Pull the observations into distinct lists for charting. for element in data_column: P_DICT[f'x_obs{area}'].append(element[0]) P_DICT[f'y_obs{area}'].append(float(element[1])) # ============================= Adjustment Factor ============================= # Allows user to shift data on the Y axis (for example, to display multiple binary # sources on the same chart.) if PROPS[f'area{area}adjuster'] != "": temp_list = [] for obs in P_DICT[f'y_obs{area}']: expr = f"{obs}{PROPS[f'area{area}adjuster']}" temp_list.append(chart_tools.eval_expr(expr=expr)) P_DICT[f'y_obs{area}'] = temp_list # ================================ Prune Data ================================= # Prune the data if warranted dates_to_plot = P_DICT[f'x_obs{area}'] try: limit = float(PROPS['limitDataRangeLength']) except ValueError: limit = 0 if limit > 0: y_obs = P_DICT[f'y_obs{area}'] new_old = PROPS['limitDataRange'] x_index = f'x_obs{area}' y_index = f'y_obs{area}' P_DICT[x_index], P_DICT[y_index] = chart_tools.prune_data( x_data=dates_to_plot, y_data=y_obs, limit=limit, new_old=new_old, logger=LOG ) # ======================== Convert Dates for Charting ========================= P_DICT[f'x_obs{area}'] = \ chart_tools.format_dates(list_of_dates=P_DICT[f'x_obs{area}'], logger=LOG) _ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']] # We need to plot all the stacks at once, so we create some tuples to hold the data we # need later. Y_OBS_TUPLE += (P_DICT[f'y_obs{area}'],) Y_COLORS_TUPLE += (P_DICT[f'area{area}Color'],) X_OBS = P_DICT[f'x_obs{area}'] # ================================ Annotations ================================ # New annotations code begins here - DaveL17 2019-06-05 for _ in range(1, area + 1, 1): tup = () # We start with the ordinal list and create a tuple to hold all the lists that # come before it. for k in range(_, 0, -1): tup += (P_DICT[f'y_obs{area}'],) # The relative value is the sum of each list element plus the ones that come before # it (i.e., tup[n][0] + tup[n-1][0] + tup[n-2][0] Y_OBS_TUPLE_REL[f'y_obs{area}'] = [sum(t) for t in zip(*tup)] annotate = P_DICT[f'area{area}Annotate'] precision = int(PROPS.get(f'area{area}AnnotationPrecision', "0")) if annotate: for xy in zip(P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}']): ax.annotate( f"{float(xy[1]):.{precision}f}", xy=xy, xytext=(0, 0), zorder=10, **K_DICT['k_annotation'] ) y_data = chart_tools.hide_anomalies(data=Y_OBS_TUPLE[0], props=PROPS, logger=LOG) ax.stackplot( X_OBS, Y_OBS_TUPLE, edgecolor=None, colors=Y_COLORS_TUPLE, zorder=10, lw=0, **K_DICT['k_line'] ) # ============================== Y1 Axis Min/Max ============================== # Min and Max are not 'None'. The p_dict['data_array'] contains individual data points and # doesn't take into account the additive nature of the plot. Therefore, we get the axis scaling # values from the plot and then use those for min/max. _ = [P_DICT['data_array'].append(node) for node in ax.get_ylim()] chart_tools.format_axis_y1_min_max(p_dict=P_DICT, logger=LOG) # Transparent Chart Fill if P_DICT['transparent_charts'] and P_DICT['transparent_filled']: ax.add_patch( patches.Rectangle( (0, 0), 1, 1, transform=ax.transAxes, facecolor=P_DICT['faceColor'], zorder=1 ) ) # ================================== Legend =================================== if P_DICT['showLegend']: # Amend the headers if there are any custom legend entries defined. counter = 1 final_headers = [] headers = P_DICT['headers'] # headers = [_ for _ in P_DICT['headers']] # headers = [_.decode('utf-8') for _ in P_DICT['headers']] for header in headers: if P_DICT[f'area{counter}Legend'] == "": final_headers.append(header) else: final_headers.append(P_DICT[f'area{counter}Legend']) counter += 1 # Set the legend # Reorder the headers and colors so that they fill by row instead of by column num_col = int(P_DICT['legendColumns']) iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)]) final_headers = list(iter_headers) iter_colors = itertools.chain(*[Y_COLORS_TUPLE[i::num_col] for i in range(num_col)]) final_colors = list(iter_colors) # Note that the legend does not support the PolyCollection created by the stackplot. # Therefore, we have to use a proxy artist. https://stackoverflow.com/a/14534830/2827397 p1 = patches.Rectangle((0, 0), 1, 1) p2 = patches.Rectangle((0, 0), 1, 1) legend = ax.legend( [p1, p2], final_headers, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=num_col, prop={'size': float(P_DICT['legendFontSize'])} ) # Set legend font color _ = [text.set_color(P_DICT['fontColor']) for text in legend.get_texts()] # Set legend area color num_handles = len(legend.legendHandles) _ = [legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)] frame = legend.get_frame() frame.set_alpha(0) for area in range(1, 9, 1): suppress_area = P_DICT.get(f'suppressArea{area}', False) if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # Note that we do these after the legend is drawn so that these areas don't affect the # legend. # We need to reload the dates to ensure that they match the area being plotted # dates_to_plot = self.format_dates(p_dict[f'x_obs{area}']) # =============================== Best Fit Line =============================== if PROPS.get(f'line{area}BestFit', False): chart_tools.format_best_fit_line_segments( ax=ax, dates_to_plot=P_DICT[f'x_obs{area}'], line=area, p_dict=P_DICT, logger=LOG ) _ = [P_DICT['data_array'].append(node) for node in
if suppress_area: LOG['Info'].append( f"[{CHART_NAME}] Area {area} is suppressed by user setting. You can re-enable it " f"in the device configuration menu." )
random_line_split
chart_area.py
try: ax = chart_tools.make_chart_figure( width=P_DICT['chart_width'], height=P_DICT['chart_height'], p_dict=P_DICT ) chart_tools.format_axis_x_ticks(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) chart_tools.format_axis_y(ax=ax, p_dict=P_DICT, k_dict=K_DICT, logger=LOG) for area in range(1, 9, 1): suppress_area = P_DICT.get(f"suppressArea{area}", False) # If the area is suppressed, remind the user they suppressed it. if suppress_area: LOG['Info'].append( f"[{CHART_NAME}] Area {area} is suppressed by user setting. You can re-enable it " f"in the device configuration menu." ) # ============================== Plot the Areas =============================== # Plot the areas. If suppress_area is True, we skip it. if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # If area color is the same as the background color, alert the user. if P_DICT[f'area{area}Color'] == P_DICT['backgroundColor'] and not suppress_area: LOG['Warning'].append( f"[{CHART_NAME}] Area {area} color is the same as the background color (so " f"you may not be able to see it)." ) data_path = PLUG_DICT['dataPath'] area_source = P_DICT[f'area{area}Source'] data_column = chart_tools.get_data(data_source=f'{data_path}{area_source}', logger=LOG) if PLUG_DICT['verboseLogging']: LOG['Threaddebug'].append(f"[{CHART_NAME}] Data for Area {area}: {data_column}") # Pull the headers P_DICT['headers'].append(data_column[0][1]) del data_column[0] # Pull the observations into distinct lists for charting. for element in data_column: P_DICT[f'x_obs{area}'].append(element[0]) P_DICT[f'y_obs{area}'].append(float(element[1])) # ============================= Adjustment Factor ============================= # Allows user to shift data on the Y axis (for example, to display multiple binary # sources on the same chart.) if PROPS[f'area{area}adjuster'] != "": temp_list = [] for obs in P_DICT[f'y_obs{area}']: expr = f"{obs}{PROPS[f'area{area}adjuster']}" temp_list.append(chart_tools.eval_expr(expr=expr)) P_DICT[f'y_obs{area}'] = temp_list # ================================ Prune Data ================================= # Prune the data if warranted dates_to_plot = P_DICT[f'x_obs{area}'] try: limit = float(PROPS['limitDataRangeLength']) except ValueError: limit = 0 if limit > 0: y_obs = P_DICT[f'y_obs{area}'] new_old = PROPS['limitDataRange'] x_index = f'x_obs{area}' y_index = f'y_obs{area}' P_DICT[x_index], P_DICT[y_index] = chart_tools.prune_data( x_data=dates_to_plot, y_data=y_obs, limit=limit, new_old=new_old, logger=LOG ) # ======================== Convert Dates for Charting ========================= P_DICT[f'x_obs{area}'] = \ chart_tools.format_dates(list_of_dates=P_DICT[f'x_obs{area}'], logger=LOG) _ = [P_DICT['data_array'].append(node) for node in P_DICT[f'y_obs{area}']] # We need to plot all the stacks at once, so we create some tuples to hold the data we # need later. Y_OBS_TUPLE += (P_DICT[f'y_obs{area}'],) Y_COLORS_TUPLE += (P_DICT[f'area{area}Color'],) X_OBS = P_DICT[f'x_obs{area}'] # ================================ Annotations ================================ # New annotations code begins here - DaveL17 2019-06-05 for _ in range(1, area + 1, 1): tup = () # We start with the ordinal list and create a tuple to hold all the lists that # come before it. for k in range(_, 0, -1): tup += (P_DICT[f'y_obs{area}'],) # The relative value is the sum of each list element plus the ones that come before # it (i.e., tup[n][0] + tup[n-1][0] + tup[n-2][0] Y_OBS_TUPLE_REL[f'y_obs{area}'] = [sum(t) for t in zip(*tup)] annotate = P_DICT[f'area{area}Annotate'] precision = int(PROPS.get(f'area{area}AnnotationPrecision', "0")) if annotate: for xy in zip(P_DICT[f'x_obs{area}'], Y_OBS_TUPLE_REL[f'y_obs{area}']): ax.annotate( f"{float(xy[1]):.{precision}f}", xy=xy, xytext=(0, 0), zorder=10, **K_DICT['k_annotation'] ) y_data = chart_tools.hide_anomalies(data=Y_OBS_TUPLE[0], props=PROPS, logger=LOG) ax.stackplot( X_OBS, Y_OBS_TUPLE, edgecolor=None, colors=Y_COLORS_TUPLE, zorder=10, lw=0, **K_DICT['k_line'] ) # ============================== Y1 Axis Min/Max ============================== # Min and Max are not 'None'. The p_dict['data_array'] contains individual data points and # doesn't take into account the additive nature of the plot. Therefore, we get the axis scaling # values from the plot and then use those for min/max. _ = [P_DICT['data_array'].append(node) for node in ax.get_ylim()] chart_tools.format_axis_y1_min_max(p_dict=P_DICT, logger=LOG) # Transparent Chart Fill if P_DICT['transparent_charts'] and P_DICT['transparent_filled']: ax.add_patch( patches.Rectangle( (0, 0), 1, 1, transform=ax.transAxes, facecolor=P_DICT['faceColor'], zorder=1 ) ) # ================================== Legend =================================== if P_DICT['showLegend']: # Amend the headers if there are any custom legend entries defined. counter = 1 final_headers = [] headers = P_DICT['headers'] # headers = [_ for _ in P_DICT['headers']] # headers = [_.decode('utf-8') for _ in P_DICT['headers']] for header in headers: if P_DICT[f'area{counter}Legend'] == "": final_headers.append(header) else: final_headers.append(P_DICT[f'area{counter}Legend']) counter += 1 # Set the legend # Reorder the headers and colors so that they fill by row instead of by column num_col = int(P_DICT['legendColumns']) iter_headers = itertools.chain(*[final_headers[i::num_col] for i in range(num_col)]) final_headers = list(iter_headers) iter_colors = itertools.chain(*[Y_COLORS_TUPLE[i::num_col] for i in range(num_col)]) final_colors = list(iter_colors) # Note that the legend does not support the PolyCollection created by the stackplot. # Therefore, we have to use a proxy artist. https://stackoverflow.com/a/14534830/2827397 p1 = patches.Rectangle((0, 0), 1, 1) p2 = patches.Rectangle((0, 0), 1, 1) legend = ax.legend( [p1, p2], final_headers, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=num_col, prop={'size': float(P_DICT['legendFontSize'])} ) # Set legend font color _ = [text.set_color(P_DICT['fontColor']) for text in legend.get_texts()] # Set legend area color num_handles = len(legend.legendHandles) _ = [legend.legendHandles[_].set_color(final_colors[_]) for _ in range(0, num_handles)] frame = legend.get_frame() frame.set_alpha(0) for area in range(1, 9, 1): suppress_area = P_DICT.get(f'suppressArea{area}', False) if P_DICT[f'area{area}Source'] not in ("", "None") and not suppress_area: # Note that we do these after the legend is drawn so that these areas don't affect the # legend. # We need to reload the dates to ensure that they match the area being plotted # dates_to_plot = self.format_dates(p_dict[f'x_obs{area}']) # =============================== Best Fit Line =============================== if PROPS.get(f'line{area}BestFit', False): chart_tools.format_best_fit_line_segments(
...
identifier_body
AudioManager.js
. 有新audioId if (audioId && audioId != this.getId()){ this.change(audioId) return } //2. 当前播放音频地址未定义 if (_util.isUndefined(this._audio.audioUrl)) { this._setState("error", { code: -1, error: "暂无播放内容" }) return } //3.从分享直接进入播放页面,如果有在其他小程序播放音频,重新加载播放 if (_util.isUndefined(this._am.src)) { this._play(this.getId()) return } //4. 暂停 if (!this.isPlaying()) { this.pause() return } //5. 播放 this._setAudio(this._audio) } //播放/暂停 play(_callback) { //1. 切换新课节 if (this._isNewLessonId(_callback)) { if (!this._am.paused) this._am.stop() //停止正在播放音频 this._play(_callback.lessonId) return } //2. 当前播放音频地址未定义 let audioUrl = this._audio.audioUrl if (_util.isUndefined(audioUrl)) { this._setState("error", { code: -1, error: "暂无播放内容" }) return } //3.从分享直接进入播放页面,如果有在其他小程序播放音频,重新加载播放 let src = this._am.src if (_util.isUndefined(src)) { this._play(this.getId()) return } // 4. 暂停 if (!this._am.paused) { this._am.pause() return } // 5. 播放 this._setAudio(this._audio) } //切换音频 change(audioId) { // if (!this._am.paused) this._am.stop() //停止正在播放音频 if (this._hasLessonId(audioId)) { this._play(audioId) } else { _ } } //上一首 previous(_callback) { let audioId = this._audio.preLessonId if (!_util.isRealNum(audioId) || audioId < 0) { if (this._hasFailure(_callback)) { _callback.failure("已是第一个!") } return } this._setState("previous") if (this._hasSuccess(_callback)) _callback.success() this._play(audioId) } //下一首 next(_callback) { let audioId = this._audio.nextLessonId if (!_util.isRealNum(audioId) || audioId < 0) { if (this._hasFailure(_callback)) _callback.failure("已是最后一个") return } this._setState("next") if (this._hasSuccess(_callback)) _callback.success() this._play(audioId) } //快进 forward(_callback) { var time = this._hasSeekTime(_callback) if (this.isPlaying()) { let start = this.getProgress() + time start = start > this.getDuration() ? this.getDuration() : start this._am.seek(start) } } //快退 fastback(_callback) { var time = this._hasSeekTime(_callback) if (this.isPlaying()) { let start = this.getProgress() - time start = start <= 0 ? 0 : start this._am.seek(start) } } //应用被回调onHide(home健)后,通知栏关闭播放或其他小程序播放操作,手动同步播放状态 syncState(callback) { let that = this wx.getBackgroundAudioPlayerState({ success: res => { let status = res.status let dataUrl = res.dataUrl //播放URL是未定义时,表示停止状态 if (_util.isUndefined(that._audio.audioUrl)) that._state = "stop" //如果status=0表示 暂停或停止, 如果原先是停止状态就是停止状态,否则就是暂停状态 if (0 == status) that._state = "stop" == that.getState() ? "stop" : "pause" that._isPaused = (0 == status) //回调状态,更新UI if (!_util.isUndefined(callback)) callback(that.getState()) //清除进度定时器,并上传进度 if (0 == status) that._updateProgress(that.getState()) }, fail: (msg) => { //如果进入其他小程序播放音频后,会进入此方法 that._state = "stop" that._isPaused = true //回调状态,更新UI if (!_util.isUndefined(callback)) callback(that.getState()) //清除进度定时器 that._clearInter() } }) } //获取当前课节音频信息 getLessonAudio() { return this._audio } //更新当前课节对象 setLessonAudio(audio) { this._audio = audio } //更新课节音频数据 updateAudio(audioId) { return this._loadData(audioId) } //获取当前课节Id getId() { return this._audio.id } //播放模式 order:顺序播放 random:随机播放 single:单个播放 setPlayMode(mode) { this._mode = mode } //当前播放状态 默认:'default', 播放:'play', 暂停:'pause', 停止:'stop', 结束:'end', 加载:'loading' getState() { return this._state; } // 是否正在播放 isPlaying() { return !this._isPaused } // 音频是否在播放 isCurrentId(audioId){ return audioId && audioId === this.getId() } //获取暂停的时间 getProgress() { return this._currTime } //获取当前音频时长 getDuration() { return this._am.duration <= 0 ? this._audio.totalTime : this._am.duration } //播放拖动某位置 seekTo(value) { this._am.seek(value) } //停止 stop() { if(this.isPlaying()){ this._am.stop() }else{ this._setState("stop") } } //暂停 pause() { if ("play"===this._state) { this._am.pause() } else { this._setState("pause") } } //清除进度上传定时器 _clearInter() { if (_util.isUndefined(this._interval)) clearInterval(this.interval) } //定时上传播放进度 _uploadProgress(self, data) { if (null == self._am.src) return //暂停,停止,播放结束 必须通过参数上传进度 if (!_util.isUndefined(data)) { if (data.time <= 1) return self._postProgress(data) return } //上传正在播放的音频进度 let time = parseInt(self._currTime) if (time <= 1) return let id = self.getId() self._postProgress({ lessonId: id, time: time }) } //上传播放进度 _postProgress(data) { _network.POST(UPLOAD_URL, { params: { lessonId: data.lessonId, listenTime: data.time }, success: res => { console.log("time:" + data.time + " id:" + data.lessonId) }, fail: msg => { console.log("上传播放时间失败:" + msg) } }) } //通过课节ID从服务器获取音频相关数据 _loadData(_lessonId) { // Dialog.showLoading('加载中...') // if (this._isPlayPage(_util.currPage())) this._setState("loading") return new Promise((resolve, reject) => { _network.GET(AUDIO_URL, { params: { lessonId: _lessonId }, success: res => resolve(res), fail: msg => reject(msg), // complete: () => { if (!this._isPlayPage()) Dialog.hideLoading() } }) }).then(res => { this._setState("success", res.data) this._audio = res.data.data return res.data.data }, msg => { let errCode = (msg === "您还未购买该专栏") ? 1201:-1 this._setState("failure", { code: errCode, error: msg }) this.pause() }) } //加载
音频数据 _play(_lessonId) { this._loadData(_lessonId) .then(audio => { if (_util.isUndefined(audio)) return //暂停正在播放音频 // if (!this._am.paused) this._am.stop() let learnTime = _util.isRealNum(audio.learnTime) ? parseInt(audio.learnTime) : 0 let totalTime = _util.isRealNum(audio.totalTime) ? parseInt(audio.totalTime) : 0 //判断当前所在的是不是播放页面 // let isPl
identifier_body
AudioManager.js
:'play', 暂停:'pause', 停止:'stop', 结束:'end', 加载:'loading' getState() { return this._state; } // 是否正在播放 isPlaying() { return !this._isPaused } // 音频是否在播放 isCurrentId(audioId){ return audioId && audioId === this.getId() } //获取暂停的时间 getProgress() { return this._currTime } //获取当前音频时长 getDuration() { return this._am.duration <= 0 ? this._audio.totalTime : this._am.duration } //播放拖动某位置 seekTo(value) { this._am.seek(value) } //停止 stop() { if(this.isPlaying()){ this._am.stop() }else{ this._setState("stop") } } //暂停 pause() { if ("play"===this._state) { this._am.pause() } else { this._setState("pause") } } //清除进度上传定时器 _clearInter() { if (_util.isUndefined(this._interval)) clearInterval(this.interval) } //定时上传播放进度 _uploadProgress(self, data) { if (null == self._am.src) return //暂停,停止,播放结束 必须通过参数上传进度 if (!_util.isUndefined(data)) { if (data.time <= 1) return self._postProgress(data) return } //上传正在播放的音频进度 let time = parseInt(self._currTime) if (time <= 1) return let id = self.getId() self._postProgress({ lessonId: id, time: time }) } //上传播放进度 _postProgress(data) { _network.POST(UPLOAD_URL, { params: { lessonId: data.lessonId, listenTime: data.time }, success: res => { console.log("time:" + data.time + " id:" + data.lessonId) }, fail: msg => { console.log("上传播放时间失败:" + msg) } }) } //通过课节ID从服务器获取音频相关数据 _loadData(_lessonId) { // Dialog.showLoading('加载中...') // if (this._isPlayPage(_util.currPage())) this._setState("loading") return new Promise((resolve, reject) => { _network.GET(AUDIO_URL, { params: { lessonId: _lessonId }, success: res => resolve(res), fail: msg => reject(msg), // complete: () => { if (!this._isPlayPage()) Dialog.hideLoading() } }) }).then(res => { this._setState("success", res.data) this._audio = res.data.data return res.data.data }, msg => { let errCode = (msg === "您还未购买该专栏") ? 1201:-1 this._setState("failure", { code: errCode, error: msg }) this.pause() }) } //加载音频数据 _play(_lessonId) { this._loadData(_lessonId) .then(audio => { if (_util.isUndefined(audio)) return //暂停正在播放音频 // if (!this._am.paused) this._am.stop() let learnTime = _util.isRealNum(audio.learnTime) ? parseInt(audio.learnTime) : 0 let totalTime = _util.isRealNum(audio.totalTime) ? parseInt(audio.totalTime) : 0 //判断当前所在的是不是播放页面 // let isPlayPage = _util.currPageRoute() == "audio/pages/lesson_audio/lesson_audio" //上次已播放结束 // if (isPlayPage && learnTime + 1 >= totalTime) { // Dialog.showPlayEnd(audio.title, // { // comfirm: () => { this._setAudio(audio) }, // cancel: () => { this._audio = audio } // }) // //如果上次播放时间大于1秒,则进行接着上次播放继续提示 // } else if (isPlayPage && learnTime >= 2) { // Dialog.shotProgress(learnTime, // { // comfirm: () => { this._currTime = learnTime; this._setAudio(audio) }, // cancel: () => this._audio = audio // }) // } else { // this._setAudio(audio) // } this._currTime = learnTime >= totalTime - 1 ? 0 : learnTime //检查网络 this._checkNetworkType({ confirm: () => {this._setAudio(audio) }, cancel: () => { } }) }) } //设置音频数据 _setAudio(audio) { if (_util.isUndefined(audio) || _util.isUndefined(audio.audioUrl)) { this._setState("error", { code: -1, error: "暂无播放内容" }) return } this._am.src = audio.audioUrl this._am.seek(this._currTime) this._am.startTime = this._currTime this._am.title = audio.title // this._am.epname = audio.title // this._am.singer = audio.title this._am.coverImgUrl = audio.coverPic this._audio = audio } //设置状态 _setState(_state, res) { if (!_util.isUndefined(res)) { this._state = _state this._stateListeners.map(_listener => { _listener(this._state, res) }) } //如果当前音频地址与正在播放的地址不一致(播放错乱或其他小程序正在播放) else { // if (null != this._am.src && this._am.src != this._audio.audioUrl) { // _state = "stop" // } this._state = _state this._stateListeners.map(_listener => { _listener(this._state) }) } this._isPaused = "play" != this._state && "wait" != this._state this._updateProgress(this._state) } //更新进度 _updateProgress(state) { if (_util.isUndefined(this._interval)) clearInterval(this.interval) switch(state){ case "stop": case "pause": case "end": case "next": case "previous": this._uploadProgress(this, { lessonId: this.getId(), time: parseInt(this._currTime) }) break case "play": this.interval = setInterval(this._uploadProgress, 5000, this) break } } //初始化网络监听和选择时播放状态 _checkNetworkType(_callback) { //当前网络是否是移动网络 wx.getNetworkType({ success: res => { if (_util.mobileNetwork(res.networkType) && wx.getStorageSync("tip")) { _util.networkModal({ confirm: () => _callback.confirm(), cancel: () => _callback.cancel() }) wx.setStorageSync("tip", false) } else { _callback.confirm() } } }) //网络状态改变监听 wx.onNetworkStatusChange(res => { this._networkChange(res) }) } //网络状态改变 _networkChange(res) { if (!res.isConnected) util.toast("网络已断开") // let currNetworkType = res.networkType; // let lastNetworkType = wx.getStorageSync("networkType") // if (audioManager.isPlaying() && util.mobileNetwork(currNetworkType)) { // if (lastNetworkType != currNetworkType) { // util.networkModal({ cancel: () => { audioManager.play() } }) // } // } // wx.setStorageSync("networkType", currNetworkType) } //回调是否定义,且返回快进秒数 _hasSeekTime(_callback) { return !_util.isUndefined(_callback) && !_util.isUndefined(_callback.time) ? _callback.time : 15 } //回调是否定义,且回调有音频位置 _isNewLessonId(_callback) { return !_util.isUndefined(_callback) && !_util.isUndefined(_callback.lessonId) && _callback.lessonId != this._audio.id } //回调是否定义,且回调有课节音频ID _hasLessonId(lessonId) { return !_util.isUndefined(lessonId) } //回调是否定义,且回调有success函数 _hasSuccess(_callback) { return !_util.isUndefined(_callback) && !_util.isUndefined(_callback.success) } //回调是否定义,且回调有failure函数 _hasFailure(_callback) { return !_util.isUndefined(_callback) && !_util.isUndefined(_callback.failure) } //当前页面是不是播放音频页面 _isPlayPage(page) { return page.route == "audio/pages/lesson_audio/lesson_audio" } //重置音频数据 _resetAudioData() { this._currTime = 0 } } export { AudioManager }
identifier_name
AudioManager.js
this._setState("stop") this._resetAudioData() }) //完成 this._am.onEnded(() => { this._setState("end") this._resetAudioData() this.next() }) //出错 this._am.onError((error) => { this._resetAudioData() let errCode = error.errCode; let errMsg = errorList[errCode.toString()] this._setState("error", { code: errCode, error: errMsg }) }) //缓冲 this._am.onWaiting(() => { Dialog.showLoading('缓冲中...') this._setState("wait") }) //上一 this._am.onPrev(() => { this.previous() }) //下一 this._am.onNext(() => { this.next() }) //进度 this._am.onTimeUpdate(() => { if (this._am.paused) return // if ("play" != this._state && "wait" != this._state) { // this._state = "play" // this._stateListeners.map(_listener => { _listener(this._state) }) // Dialog.hideLoading() // } this._currTime = this._am.currentTime let playTime = this._am.currentTime let duration = this._am.duration let totalTime = null == duration ? this._audio.totalTime : duration this._timeListeners.map(_listener => { _listener(playTime, totalTime) }) }) } //设置音频播放列表 setLessonId(lessonId) { this._currLessonId = lessonId this._play(lessonId) } //设置开始播放时间 setCurrStartTime(time) { this._currTime = time } //播放状态监听 addStateListener(_listener) { this._stateListeners.push(_listener) } //添加时间进度监听 addTimeListener(_listener) { this._timeListeners.push(_listener) } //播放状态监听 removeStateListener(_listener) { this._stateListeners.splice(this._stateListeners.indexOf(_listener), 1) } //移除进度监听 removeTimeListener(_listener) { this._timeListeners.splice(this._timeListeners.indexOf(_listener), 1) } //强制播放 forcePlay(audioId){ if(!audioId) return //播放正在播放的音频 if (audioId === this.getId() && this.isPlaying()){ return } //播放暂停/停止的音频 if (audioId === this.getId() && "pause" === this._state){ this._setAudio(this._audio) return } //切换音频 this.change(audioId) } /** * 通过audioId播放音频 */ playByAudioId(audioId){ if(!audioId && !this.getId()) return //1. 有新audioId if (audioId && audioId != this.getId()){ this.change(audioId) return } //2. 当前播放音频地址未定义 if (_util.isUndefined(this._audio.audioUrl)) { this._setState("error", { code: -1, error: "暂无播放内容" }) return } //3.从分享直接进入播放页面,如果有在其他小程序播放音频,重新加载播放 if (_util.isUndefined(this._am.src)) { this._play(this.getId()) return } //4. 暂停 if (!this.isPlaying()) { this.pause() return } //5. 播放 this._setAudio(this._audio) } //播放/暂停 play(_callback) { //1. 切换新课节 if (this._isNewLessonId(_callback)) { if (!this._am.paused) this._am.stop() //停止正在播放音频 this._play(_callback.lessonId) return } //2. 当前播放音频地址未定义 let audioUrl = this._audio.audioUrl
this._setState("error", { code: -1, error: "暂无播放内容" }) return } //3.从分享直接进入播放页面,如果有在其他小程序播放音频,重新加载播放 let src = this._am.src if (_util.isUndefined(src)) { this._play(this.getId()) return } // 4. 暂停 if (!this._am.paused) { this._am.pause() return } // 5. 播放 this._setAudio(this._audio) } //切换音频 change(audioId) { // if (!this._am.paused) this._am.stop() //停止正在播放音频 if (this._hasLessonId(audioId)) { this._play(audioId) } else { _ } } //上一首 previous(_callback) { let audioId = this._audio.preLessonId if (!_util.isRealNum(audioId) || audioId < 0) { if (this._hasFailure(_callback)) { _callback.failure("已是第一个!") } return } this._setState("previous") if (this._hasSuccess(_callback)) _callback.success() this._play(audioId) } //下一首 next(_callback) { let audioId = this._audio.nextLessonId if (!_util.isRealNum(audioId) || audioId < 0) { if (this._hasFailure(_callback)) _callback.failure("已是最后一个") return } this._setState("next") if (this._hasSuccess(_callback)) _callback.success() this._play(audioId) } //快进 forward(_callback) { var time = this._hasSeekTime(_callback) if (this.isPlaying()) { let start = this.getProgress() + time start = start > this.getDuration() ? this.getDuration() : start this._am.seek(start) } } //快退 fastback(_callback) { var time = this._hasSeekTime(_callback) if (this.isPlaying()) { let start = this.getProgress() - time start = start <= 0 ? 0 : start this._am.seek(start) } } //应用被回调onHide(home健)后,通知栏关闭播放或其他小程序播放操作,手动同步播放状态 syncState(callback) { let that = this wx.getBackgroundAudioPlayerState({ success: res => { let status = res.status let dataUrl = res.dataUrl //播放URL是未定义时,表示停止状态 if (_util.isUndefined(that._audio.audioUrl)) that._state = "stop" //如果status=0表示 暂停或停止, 如果原先是停止状态就是停止状态,否则就是暂停状态 if (0 == status) that._state = "stop" == that.getState() ? "stop" : "pause" that._isPaused = (0 == status) //回调状态,更新UI if (!_util.isUndefined(callback)) callback(that.getState()) //清除进度定时器,并上传进度 if (0 == status) that._updateProgress(that.getState()) }, fail: (msg) => { //如果进入其他小程序播放音频后,会进入此方法 that._state = "stop" that._isPaused = true //回调状态,更新UI if (!_util.isUndefined(callback)) callback(that.getState()) //清除进度定时器 that._clearInter() } }) } //获取当前课节音频信息 getLessonAudio() { return this._audio } //更新当前课节对象 setLessonAudio(audio) { this._audio = audio } //更新课节音频数据 updateAudio(audioId) { return this._loadData(audioId) } //获取当前课节Id getId() { return this._audio.id } //播放模式 order:顺序播放 random:随机播放 single:单个播放 setPlayMode(mode) { this._mode = mode } //当前播放状态 默认:'default', 播放:'play', 暂停:'pause', 停止:'stop', 结束:'end', 加载:'loading' getState() { return this._state; } // 是否正在播放 isPlaying() { return !this._isPaused } // 音频是否在播放 isCurrentId(audioId){ return audioId && audioId === this.getId() } //获取暂停的时间 getProgress() { return this._currTime } //获取当前音频时长 getDuration() { return this._am.duration <= 0 ? this._audio.totalTime : this._am.duration } //播放拖动某位置 seekTo(value) { this._am.seek(value) } //停止
if (_util.isUndefined(audioUrl)) {
random_line_split
main.py
(Model): #NUEVA CLASE CREADA @saving.allow_read_from_gcs def load_weights_new(self, filepath, skip_mismatch=False, reshape=False): """Loads all layer weights from a HDF5 save file. If `by_name` is False (default) weights are loaded based on the network's topology, meaning the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. # Arguments filepath: String, path to the weights file to load. by_name: Boolean, whether to load weights by name or by topological order. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name`=True). reshape: Reshape weights to fit the layer when the correct number of weight arrays is present but their shape does not match. # Raises ImportError: If h5py is not available. """ with h5py.File(filepath, mode='r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] #Nueva funcion desarrollada weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape) if hasattr(f, 'close'): f.close() elif hasattr(f.file, 'close'): f.file.close() #class TensorNew(Layer): # '''We have the calls to add_weight(), and then call the super's build()''' # def __init__(self): # super().__init__() # self.tensor_shape = self. #EN CONSTRUCCION # # def numpy(self): # return tf.make_ndarray(self) ##////////////////////////////////////////////////////////////////// def
(b_s, phase_gen='train'): if phase_gen == 'train': images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')] elif phase_gen == 'val': images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')] else: raise NotImplementedError images.sort() maps.sort() fixs.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) counter = 0 while True: Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out) Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix] counter = (counter + b_s) % len(images) def generator_test(b_s, imgs_test_path): images = [imgs_test_path + "\\" + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] images.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) #counter = 0 #while True: # yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] # counter = (counter + b_s) % len(images) #Funciona para b_s = 1 NUEVA VERSION! counter = 0 while counter < len(images): print("Ejecutado generator_test para la imagen ", counter + 1) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] counter = counter + 1 if __name__ == '__main__': #if len(sys.argv) == 1: #Ejecucion por consola de windows if len(listaArg) == 1: #Ejecucion por codigo raise NotImplementedError else: #phase = sys.argv[1] #Ejecucion por consola de windows phase = listaArg[1] #Ejecucion por codigo #x = Input((3, shape_r, shape_c)) x = Input((shape_r, shape_c, 3)) #Nueva version x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt)) #x_maps = Input((shape_r_gt, shape_c_gt, nb_gaussian)) #Nueva version if version == 0: #NO USADO # m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps])) print("Not Compiling SAM-VGG") #Nueva version # m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) elif version == 1: '''Hint of the problem: something is not the output of a keras layer. You should put it in a lambda layer When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem''' #m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps])) #m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #New version m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version print("Compiling SAM-ResNet") m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) print("Compilado") else: raise NotImplementedError if phase == 'train': if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0: print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: print("Training SAM-VGG") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif version == 1: print("Training SAM-ResNet") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif phase == "test": # Output Folder Path output_folder = 'predictions/' #if len(sys.argv) < 2: #Ejecucion por consola de windows # raise SyntaxError #imgs_test_path = sys.argv[2] if len(listaArg) < 2: #Ejecucion por codigo raise SyntaxError imgs_test_path = listaArg[2] file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] file_names.sort() nb_imgs_test = len(file_names) if nb_imgs_test % b_s != 0: print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: #NO ACTIVA print("Not Loading SAM-VGG weights") #m.load_weights('weights/sam-vgg_salicon_weights.pkl') elif version == 1: # for i in range(len(m.layers)): # print('____________________________________________') # nro = i # print(i) # print(m.layers[nro]) # weight = m.layers[nro].get_weights() # if(len(weight) == 0): # print
generator
identifier_name
main.py
Aux(Model): #NUEVA CLASE CREADA @saving.allow_read_from_gcs def load_weights_new(self, filepath, skip_mismatch=False, reshape=False): """Loads all layer weights from a HDF5 save file. If `by_name` is False (default) weights are loaded based on the network's topology, meaning the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. # Arguments filepath: String, path to the weights file to load. by_name: Boolean, whether to load weights by name or by topological order. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name`=True). reshape: Reshape weights to fit the layer when the correct number of weight arrays is present but their shape does not match. # Raises ImportError: If h5py is not available. """ with h5py.File(filepath, mode='r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] #Nueva funcion desarrollada weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape) if hasattr(f, 'close'): f.close() elif hasattr(f.file, 'close'): f.file.close() #class TensorNew(Layer): # '''We have the calls to add_weight(), and then call the super's build()''' # def __init__(self): # super().__init__() # self.tensor_shape = self. #EN CONSTRUCCION # # def numpy(self): # return tf.make_ndarray(self) ##////////////////////////////////////////////////////////////////// def generator(b_s, phase_gen='train'): if phase_gen == 'train': images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')] elif phase_gen == 'val': images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')] else: raise NotImplementedError images.sort() maps.sort() fixs.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) counter = 0 while True: Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out) Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix] counter = (counter + b_s) % len(images) def generator_test(b_s, imgs_test_path): images = [imgs_test_path + "\\" + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] images.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) #counter = 0 #while True: # yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] # counter = (counter + b_s) % len(images) #Funciona para b_s = 1 NUEVA VERSION! counter = 0 while counter < len(images): print("Ejecutado generator_test para la imagen ", counter + 1) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] counter = counter + 1 if __name__ == '__main__': #if len(sys.argv) == 1: #Ejecucion por consola de windows if len(listaArg) == 1: #Ejecucion por codigo raise NotImplementedError else: #phase = sys.argv[1] #Ejecucion por consola de windows phase = listaArg[1] #Ejecucion por codigo #x = Input((3, shape_r, shape_c)) x = Input((shape_r, shape_c, 3)) #Nueva version x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt)) #x_maps = Input((shape_r_gt, shape_c_gt, nb_gaussian)) #Nueva version if version == 0: #NO USADO # m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps])) print("Not Compiling SAM-VGG") #Nueva version # m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) elif version == 1: '''Hint of the problem: something is not the output of a keras layer. You should put it in a lambda layer When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem''' #m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps])) #m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #New version m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version print("Compiling SAM-ResNet")
print("Compilado") else: raise NotImplementedError if phase == 'train': if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0: print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: print("Training SAM-VGG") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif version == 1: print("Training SAM-ResNet") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif phase == "test": # Output Folder Path output_folder = 'predictions/' #if len(sys.argv) < 2: #Ejecucion por consola de windows # raise SyntaxError #imgs_test_path = sys.argv[2] if len(listaArg) < 2: #Ejecucion por codigo raise SyntaxError imgs_test_path = listaArg[2] file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] file_names.sort() nb_imgs_test = len(file_names) if nb_imgs_test % b_s != 0: print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: #NO ACTIVA print("Not Loading SAM-VGG weights") #m.load_weights('weights/sam-vgg_salicon_weights.pkl') elif version == 1: # for i in range(len(m.layers)): # print('____________________________________________') # nro = i # print(i) # print(m.layers[nro]) # weight = m.layers[nro].get_weights() # if(len(weight) == 0): # print
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
random_line_split
main.py
Aux(Model): #NUEVA CLASE CREADA @saving.allow_read_from_gcs def load_weights_new(self, filepath, skip_mismatch=False, reshape=False): """Loads all layer weights from a HDF5 save file. If `by_name` is False (default) weights are loaded based on the network's topology, meaning the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. # Arguments filepath: String, path to the weights file to load. by_name: Boolean, whether to load weights by name or by topological order. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name`=True). reshape: Reshape weights to fit the layer when the correct number of weight arrays is present but their shape does not match. # Raises ImportError: If h5py is not available. """ with h5py.File(filepath, mode='r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] #Nueva funcion desarrollada weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape) if hasattr(f, 'close'): f.close() elif hasattr(f.file, 'close'): f.file.close() #class TensorNew(Layer): # '''We have the calls to add_weight(), and then call the super's build()''' # def __init__(self): # super().__init__() # self.tensor_shape = self. #EN CONSTRUCCION # # def numpy(self): # return tf.make_ndarray(self) ##////////////////////////////////////////////////////////////////// def generator(b_s, phase_gen='train'): if phase_gen == 'train': images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')] elif phase_gen == 'val': images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')] else: raise NotImplementedError images.sort() maps.sort() fixs.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) counter = 0 while True: Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out) Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix] counter = (counter + b_s) % len(images) def generator_test(b_s, imgs_test_path): images = [imgs_test_path + "\\" + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] images.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) #counter = 0 #while True: # yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] # counter = (counter + b_s) % len(images) #Funciona para b_s = 1 NUEVA VERSION! counter = 0 while counter < len(images): print("Ejecutado generator_test para la imagen ", counter + 1) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] counter = counter + 1 if __name__ == '__main__': #if len(sys.argv) == 1: #Ejecucion por consola de windows if len(listaArg) == 1: #Ejecucion por codigo raise NotImplementedError else: #phase = sys.argv[1] #Ejecucion por consola de windows phase = listaArg[1] #Ejecucion por codigo #x = Input((3, shape_r, shape_c)) x = Input((shape_r, shape_c, 3)) #Nueva version x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt)) #x_maps = Input((shape_r_gt, shape_c_gt, nb_gaussian)) #Nueva version if version == 0: #NO USADO # m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps])) print("Not Compiling SAM-VGG") #Nueva version # m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) elif version == 1: '''Hint of the problem: something is not the output of a keras layer. You should put it in a lambda layer When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem''' #m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps])) #m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #New version m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version print("Compiling SAM-ResNet") m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) print("Compilado") else: raise NotImplementedError if phase == 'train': if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0: print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: print("Training SAM-VGG") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif version == 1: print("Training SAM-ResNet") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif phase == "test": # Output Folder Path output_folder = 'predictions/' #if len(sys.argv) < 2: #Ejecucion por consola de windows # raise SyntaxError #imgs_test_path = sys.argv[2] if len(listaArg) < 2: #Ejecucion por codigo
imgs_test_path = listaArg[2] file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] file_names.sort() nb_imgs_test = len(file_names) if nb_imgs_test % b_s != 0: print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: #NO ACTIVA print("Not Loading SAM-VGG weights") #m.load_weights('weights/sam-vgg_salicon_weights.pkl') elif version == 1: # for i in range(len(m.layers)): # print('____________________________________________') # nro = i # print(i) # print(m.layers[nro]) # weight = m.layers[nro].get_weights() # if(len(weight) == 0): # print
raise SyntaxError
conditional_block
main.py
Aux(Model): #NUEVA CLASE CREADA @saving.allow_read_from_gcs def load_weights_new(self, filepath, skip_mismatch=False, reshape=False): """Loads all layer weights from a HDF5 save file. If `by_name` is False (default) weights are loaded based on the network's topology, meaning the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed. # Arguments filepath: String, path to the weights file to load. by_name: Boolean, whether to load weights by name or by topological order. skip_mismatch: Boolean, whether to skip loading of layers where there is a mismatch in the number of weights, or a mismatch in the shape of the weight (only valid when `by_name`=True). reshape: Reshape weights to fit the layer when the correct number of weight arrays is present but their shape does not match. # Raises ImportError: If h5py is not available. """ with h5py.File(filepath, mode='r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] #Nueva funcion desarrollada weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape) if hasattr(f, 'close'): f.close() elif hasattr(f.file, 'close'): f.file.close() #class TensorNew(Layer): # '''We have the calls to add_weight(), and then call the super's build()''' # def __init__(self): # super().__init__() # self.tensor_shape = self. #EN CONSTRUCCION # # def numpy(self): # return tf.make_ndarray(self) ##////////////////////////////////////////////////////////////////// def generator(b_s, phase_gen='train'):
Y_fix = preprocess_fixmaps(fixs[counter:counter + b_s], shape_r_out, shape_c_out) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian], [Y, Y, Y_fix] counter = (counter + b_s) % len(images) def generator_test(b_s, imgs_test_path): images = [imgs_test_path + "\\" + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] images.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) #counter = 0 #while True: # yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] # counter = (counter + b_s) % len(images) #Funciona para b_s = 1 NUEVA VERSION! counter = 0 while counter < len(images): print("Ejecutado generator_test para la imagen ", counter + 1) yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian] counter = counter + 1 if __name__ == '__main__': #if len(sys.argv) == 1: #Ejecucion por consola de windows if len(listaArg) == 1: #Ejecucion por codigo raise NotImplementedError else: #phase = sys.argv[1] #Ejecucion por consola de windows phase = listaArg[1] #Ejecucion por codigo #x = Input((3, shape_r, shape_c)) x = Input((shape_r, shape_c, 3)) #Nueva version x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt)) #x_maps = Input((shape_r_gt, shape_c_gt, nb_gaussian)) #Nueva version if version == 0: #NO USADO # m = Model(input=[x, x_maps], output=sam_vgg([x, x_maps])) print("Not Compiling SAM-VGG") #Nueva version # m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) elif version == 1: '''Hint of the problem: something is not the output of a keras layer. You should put it in a lambda layer When invoking the Model API, the value for outputs argument should be tensor(or list of tensors), in this case it is a list of list of tensors, hence there is a problem''' #m = Model(input=[x, x_maps], output=sam_resnet([x, x_maps])) #m = Model(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #New version m = ModelAux(inputs=[x, x_maps], outputs=sam_resnet([x, x_maps])) #Final version print("Compiling SAM-ResNet") m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss]) print("Compilado") else: raise NotImplementedError if phase == 'train': if nb_imgs_train % b_s != 0 or nb_imgs_val % b_s != 0: print("The number of training and validation images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: print("Training SAM-VGG") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-vgg.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif version == 1: print("Training SAM-ResNet") m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch, validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val, callbacks=[EarlyStopping(patience=3), ModelCheckpoint('weights.sam-resnet.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True)]) elif phase == "test": # Output Folder Path output_folder = 'predictions/' #if len(sys.argv) < 2: #Ejecucion por consola de windows # raise SyntaxError #imgs_test_path = sys.argv[2] if len(listaArg) < 2: #Ejecucion por codigo raise SyntaxError imgs_test_path = listaArg[2] file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))] file_names.sort() nb_imgs_test = len(file_names) if nb_imgs_test % b_s != 0: print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.") exit() if version == 0: #NO ACTIVA print("Not Loading SAM-VGG weights") #m.load_weights('weights/sam-vgg_salicon_weights.pkl') elif version == 1: # for i in range(len(m.layers)): # print('____________________________________________') # nro = i # print(i) # print(m.layers[nro]) # weight = m.layers[nro].get_weights() # if(len(weight) == 0): # print
if phase_gen == 'train': images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')] elif phase_gen == 'val': images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))] fixs = [fixs_val_path + f for f in os.listdir(fixs_val_path) if f.endswith('.mat')] else: raise NotImplementedError images.sort() maps.sort() fixs.sort() gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt)) counter = 0 while True: Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out)
identifier_body
imp.rs
pub fetched_at: Option<DateTime<Utc>>, // Link to use to download the above version pub downloadable_url: Url, } impl UpdateInfo { pub fn new(v: Version, url: Url) -> Self { UpdateInfo { version: v, fetched_at: None, downloadable_url: url, } } pub(super) fn version(&self) -> &Version { &self.version } pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> { self.fetched_at.as_ref() } pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) { self.fetched_at = Some(date_time); } } #[derive(Debug)] pub(super) struct MPSCState { // First successful call on rx.recv() will cache the results into this field recvd_payload: RefCell<Option<ReleasePayloadResult>>, // Receiver end of communication channel with worker thread rx: RefCell<Option<Receiver<ReleasePayloadResult>>>, } impl MPSCState { pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self { MPSCState { recvd_payload: RefCell::new(None), rx: RefCell::new(Some(rx)), } } } impl<T> Updater<T> where T: Releaser + Send + 'static, { pub(super) fn load_or_new(r: T) -> Result<Self> { let _ = env_logger::try_init(); if let Ok(mut saved_state) = Self::load() { // Use the version that workflow reports through environment variable // This version takes priortiy over what we may have saved last time. let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok()); if let Some(v) = env_ver { saved_state.current_version = v; } Ok(Updater { state: saved_state, releaser: RefCell::new(r), }) } else { let current_version = env::workflow_version() .map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?; let state = UpdaterState { current_version, last_check: Cell::new(None), avail_release: RefCell::new(None), worker_state: RefCell::new(None), update_interval: UPDATE_INTERVAL, }; let updater = Updater { state, releaser: RefCell::new(r), }; updater.save()?; Ok(updater) } } pub(super) fn last_check(&self) -> Option<DateTime<Utc>> { self.state.last_check.get() } pub(super) fn set_last_check(&self, t: DateTime<Utc>) { self.state.last_check.set(Some(t)); } pub(super) fn update_interval(&self) -> i64 { self.state.update_interval } pub(super) fn set_update_interval(&mut self, t: i64) { self.state.update_interval = t; } fn load() -> Result<UpdaterState> { let data_file_path = Self::build_data_fn()?; crate::Data::load_from_file(data_file_path) .ok_or_else(|| anyhow!("cannot load cached state of updater")) } // Save updater's state pub(super) fn save(&self) -> Result<()> { let data_file_path = Self::build_data_fn()?; crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| { let _r = remove_file(data_file_path); e }) } pub(super) fn start_releaser_worker( &self, tx: mpsc::Sender<ReleasePayloadResult>, p: PathBuf, ) -> Result<()> { use std::thread; let releaser = (*self.releaser.borrow()).clone(); thread::Builder::new().spawn(move || { debug!("other thread: starting in updater thread"); let talk_to_mother = || -> Result<()> { let (v, url) = releaser.latest_release()?; let mut info = UpdateInfo::new(v, url); info.set_fetched_at(Utc::now()); let payload = Some(info); Self::write_last_check_status(&p, &payload)?; tx.send(Ok(payload))?; Ok(()) }; let outcome = talk_to_mother(); debug!("other thread: finished checking releaser status"); if let Err(error) = outcome { tx.send(Err(error)) .expect("could not send error from thread"); } })?; Ok(()) } // write version of latest avail. release (if any) to a cache file pub(super) fn write_last_check_status( p: &Path, updater_info: &Option<UpdateInfo>, ) -> Result<()> { crate::Data::save_to_file(p, updater_info).map_err(|e| { let _r = remove_file(p); e }) } // read version of latest avail. release (if any) from a cache file pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> { crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path")) } pub(super) fn build_data_fn() -> Result<PathBuf> { let workflow_name = env::workflow_name() .unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string()) .chars() .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' }) .collect::<String>(); env::workflow_cache() .ok_or_else(|| { anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?") }) .and_then(|mut data_path| { env::workflow_uid() .ok_or_else(|| anyhow!("missing env variable for uid")) .map(|ref uid| { let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat(); data_path.push(filename); data_path }) }) } pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> { self.state .worker_state .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() method first.")) .and_then(|mpsc| { if mpsc.recvd_payload.borrow().is_none() { // No payload received yet, try to talk to worker thread mpsc.rx .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() correctly!")) .and_then(|rx| { let rr = if try_flag { // don't block while trying to receive rx.try_recv().map_err(|e| anyhow!(e.to_string())) } else { // block while waiting to receive rx.recv().map_err(|e| anyhow!(e.to_string())) }; rr.and_then(|msg| { let msg_status = msg.map(|update_info| { // received good message, update cache for received payload *self.state.avail_release.borrow_mut() = update_info.clone(); // update last_check if received info is newer than last_check update_info.as_ref().map(|ui| { ui.fetched_at().map(|fetched_time| { if self.last_check().is_none() || self.last_check().as_ref().unwrap() < fetched_time { self.set_last_check(*fetched_time); } }) }); *mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info)); }); // save state regardless of content of msg self.save()?; msg_status?; Ok(()) }) })?; } Ok(()) })?; Ok(self .state .avail_release .borrow() .as_ref() .map_or(false, |release| *self.current_version() < release.version)) } #[allow(dead_code)] #[deprecated(note = "update_ready_async is deprecated. use init()")] pub(super) fn _update_ready_async(&self) -> Result<bool> { let worker_state = self.state.worker_state.borrow(); assert!(worker_state.is_some(), "you need to use init first"); let mpsc = worker_state.as_ref().expect("no worker_state"); if mpsc.recvd_payload.borrow().is_none() { let rx_option = mpsc.rx.borrow(); let rx = rx_option.as_ref().unwrap(); let rr = rx.recv(); if rr.is_ok() { let msg = rr.as_ref().unwrap(); if msg.is_ok() { let update_info = msg.as_ref().unwrap(); *self.state.avail_release.borrow_mut() = update_info.clone(); *mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info.clone())); } else { return Err(anyhow!(format!("{:?}", msg.as_ref().unwrap_err()))); } self.save()?; } else { eprintln!("{:?}", rr); return Err(anyhow!(format!("{:?}", rr))); }
} if let Some(ref updater_info) = *self.state.avail_release.borrow() { if *self.current_version() < updater_info.version {
random_line_split
imp.rs
.avail_release .borrow() .as_ref() .map(|ui| ui.version().clone()) } pub(super) fn
(&self) -> Ref<'_, Option<MPSCState>> { self.worker_state.borrow() } pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> { self.worker_state.borrow_mut() } pub(super) fn download_url(&self) -> Option<Url> { self.avail_release .borrow() .as_ref() .map(|info| info.downloadable_url.clone()) } } #[derive(Debug, Serialize, Deserialize, Clone)] pub(super) struct UpdateInfo { // Latest version available from github or releaser pub version: Version, pub fetched_at: Option<DateTime<Utc>>, // Link to use to download the above version pub downloadable_url: Url, } impl UpdateInfo { pub fn new(v: Version, url: Url) -> Self { UpdateInfo { version: v, fetched_at: None, downloadable_url: url, } } pub(super) fn version(&self) -> &Version { &self.version } pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> { self.fetched_at.as_ref() } pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) { self.fetched_at = Some(date_time); } } #[derive(Debug)] pub(super) struct MPSCState { // First successful call on rx.recv() will cache the results into this field recvd_payload: RefCell<Option<ReleasePayloadResult>>, // Receiver end of communication channel with worker thread rx: RefCell<Option<Receiver<ReleasePayloadResult>>>, } impl MPSCState { pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self { MPSCState { recvd_payload: RefCell::new(None), rx: RefCell::new(Some(rx)), } } } impl<T> Updater<T> where T: Releaser + Send + 'static, { pub(super) fn load_or_new(r: T) -> Result<Self> { let _ = env_logger::try_init(); if let Ok(mut saved_state) = Self::load() { // Use the version that workflow reports through environment variable // This version takes priortiy over what we may have saved last time. let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok()); if let Some(v) = env_ver { saved_state.current_version = v; } Ok(Updater { state: saved_state, releaser: RefCell::new(r), }) } else { let current_version = env::workflow_version() .map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?; let state = UpdaterState { current_version, last_check: Cell::new(None), avail_release: RefCell::new(None), worker_state: RefCell::new(None), update_interval: UPDATE_INTERVAL, }; let updater = Updater { state, releaser: RefCell::new(r), }; updater.save()?; Ok(updater) } } pub(super) fn last_check(&self) -> Option<DateTime<Utc>> { self.state.last_check.get() } pub(super) fn set_last_check(&self, t: DateTime<Utc>) { self.state.last_check.set(Some(t)); } pub(super) fn update_interval(&self) -> i64 { self.state.update_interval } pub(super) fn set_update_interval(&mut self, t: i64) { self.state.update_interval = t; } fn load() -> Result<UpdaterState> { let data_file_path = Self::build_data_fn()?; crate::Data::load_from_file(data_file_path) .ok_or_else(|| anyhow!("cannot load cached state of updater")) } // Save updater's state pub(super) fn save(&self) -> Result<()> { let data_file_path = Self::build_data_fn()?; crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| { let _r = remove_file(data_file_path); e }) } pub(super) fn start_releaser_worker( &self, tx: mpsc::Sender<ReleasePayloadResult>, p: PathBuf, ) -> Result<()> { use std::thread; let releaser = (*self.releaser.borrow()).clone(); thread::Builder::new().spawn(move || { debug!("other thread: starting in updater thread"); let talk_to_mother = || -> Result<()> { let (v, url) = releaser.latest_release()?; let mut info = UpdateInfo::new(v, url); info.set_fetched_at(Utc::now()); let payload = Some(info); Self::write_last_check_status(&p, &payload)?; tx.send(Ok(payload))?; Ok(()) }; let outcome = talk_to_mother(); debug!("other thread: finished checking releaser status"); if let Err(error) = outcome { tx.send(Err(error)) .expect("could not send error from thread"); } })?; Ok(()) } // write version of latest avail. release (if any) to a cache file pub(super) fn write_last_check_status( p: &Path, updater_info: &Option<UpdateInfo>, ) -> Result<()> { crate::Data::save_to_file(p, updater_info).map_err(|e| { let _r = remove_file(p); e }) } // read version of latest avail. release (if any) from a cache file pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> { crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path")) } pub(super) fn build_data_fn() -> Result<PathBuf> { let workflow_name = env::workflow_name() .unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string()) .chars() .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' }) .collect::<String>(); env::workflow_cache() .ok_or_else(|| { anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?") }) .and_then(|mut data_path| { env::workflow_uid() .ok_or_else(|| anyhow!("missing env variable for uid")) .map(|ref uid| { let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat(); data_path.push(filename); data_path }) }) } pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> { self.state .worker_state .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() method first.")) .and_then(|mpsc| { if mpsc.recvd_payload.borrow().is_none() { // No payload received yet, try to talk to worker thread mpsc.rx .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() correctly!")) .and_then(|rx| { let rr = if try_flag { // don't block while trying to receive rx.try_recv().map_err(|e| anyhow!(e.to_string())) } else { // block while waiting to receive rx.recv().map_err(|e| anyhow!(e.to_string())) }; rr.and_then(|msg| { let msg_status = msg.map(|update_info| { // received good message, update cache for received payload *self.state.avail_release.borrow_mut() = update_info.clone(); // update last_check if received info is newer than last_check update_info.as_ref().map(|ui| { ui.fetched_at().map(|fetched_time| { if self.last_check().is_none() || self.last_check().as_ref().unwrap() < fetched_time { self.set_last_check(*fetched_time); } }) }); *mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info)); }); // save state regardless of content of msg self.save()?; msg_status?; Ok(()) }) })?; } Ok(()) })?; Ok(self .state .avail_release .borrow() .as_ref() .map_or(false, |release| *self.current_version() < release.version)) } #[allow(dead_code)] #[deprecated(note = "update_ready_async is deprecated. use init()")] pub(super) fn _update_ready_async(&self) -> Result<bool> { let worker_state = self.state.worker_state.borrow(); assert!(worker_state.is_some(), "you need to use init first"); let mpsc = worker_state.as_ref().expect("no worker_state"); if mpsc.recvd_payload.borrow().is_none() { let rx_option = mpsc.rx.borrow(); let rx
borrow_worker
identifier_name
imp.rs
.avail_release .borrow() .as_ref() .map(|ui| ui.version().clone()) } pub(super) fn borrow_worker(&self) -> Ref<'_, Option<MPSCState>> { self.worker_state.borrow() } pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> { self.worker_state.borrow_mut() } pub(super) fn download_url(&self) -> Option<Url> { self.avail_release .borrow() .as_ref() .map(|info| info.downloadable_url.clone()) } } #[derive(Debug, Serialize, Deserialize, Clone)] pub(super) struct UpdateInfo { // Latest version available from github or releaser pub version: Version, pub fetched_at: Option<DateTime<Utc>>, // Link to use to download the above version pub downloadable_url: Url, } impl UpdateInfo { pub fn new(v: Version, url: Url) -> Self { UpdateInfo { version: v, fetched_at: None, downloadable_url: url, } } pub(super) fn version(&self) -> &Version { &self.version } pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> { self.fetched_at.as_ref() } pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) { self.fetched_at = Some(date_time); } } #[derive(Debug)] pub(super) struct MPSCState { // First successful call on rx.recv() will cache the results into this field recvd_payload: RefCell<Option<ReleasePayloadResult>>, // Receiver end of communication channel with worker thread rx: RefCell<Option<Receiver<ReleasePayloadResult>>>, } impl MPSCState { pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self { MPSCState { recvd_payload: RefCell::new(None), rx: RefCell::new(Some(rx)), } } } impl<T> Updater<T> where T: Releaser + Send + 'static, { pub(super) fn load_or_new(r: T) -> Result<Self> { let _ = env_logger::try_init(); if let Ok(mut saved_state) = Self::load() { // Use the version that workflow reports through environment variable // This version takes priortiy over what we may have saved last time. let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok()); if let Some(v) = env_ver { saved_state.current_version = v; } Ok(Updater { state: saved_state, releaser: RefCell::new(r), }) } else { let current_version = env::workflow_version() .map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?; let state = UpdaterState { current_version, last_check: Cell::new(None), avail_release: RefCell::new(None), worker_state: RefCell::new(None), update_interval: UPDATE_INTERVAL, }; let updater = Updater { state, releaser: RefCell::new(r), }; updater.save()?; Ok(updater) } } pub(super) fn last_check(&self) -> Option<DateTime<Utc>> { self.state.last_check.get() } pub(super) fn set_last_check(&self, t: DateTime<Utc>) { self.state.last_check.set(Some(t)); } pub(super) fn update_interval(&self) -> i64 { self.state.update_interval } pub(super) fn set_update_interval(&mut self, t: i64)
fn load() -> Result<UpdaterState> { let data_file_path = Self::build_data_fn()?; crate::Data::load_from_file(data_file_path) .ok_or_else(|| anyhow!("cannot load cached state of updater")) } // Save updater's state pub(super) fn save(&self) -> Result<()> { let data_file_path = Self::build_data_fn()?; crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| { let _r = remove_file(data_file_path); e }) } pub(super) fn start_releaser_worker( &self, tx: mpsc::Sender<ReleasePayloadResult>, p: PathBuf, ) -> Result<()> { use std::thread; let releaser = (*self.releaser.borrow()).clone(); thread::Builder::new().spawn(move || { debug!("other thread: starting in updater thread"); let talk_to_mother = || -> Result<()> { let (v, url) = releaser.latest_release()?; let mut info = UpdateInfo::new(v, url); info.set_fetched_at(Utc::now()); let payload = Some(info); Self::write_last_check_status(&p, &payload)?; tx.send(Ok(payload))?; Ok(()) }; let outcome = talk_to_mother(); debug!("other thread: finished checking releaser status"); if let Err(error) = outcome { tx.send(Err(error)) .expect("could not send error from thread"); } })?; Ok(()) } // write version of latest avail. release (if any) to a cache file pub(super) fn write_last_check_status( p: &Path, updater_info: &Option<UpdateInfo>, ) -> Result<()> { crate::Data::save_to_file(p, updater_info).map_err(|e| { let _r = remove_file(p); e }) } // read version of latest avail. release (if any) from a cache file pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> { crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path")) } pub(super) fn build_data_fn() -> Result<PathBuf> { let workflow_name = env::workflow_name() .unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string()) .chars() .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' }) .collect::<String>(); env::workflow_cache() .ok_or_else(|| { anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?") }) .and_then(|mut data_path| { env::workflow_uid() .ok_or_else(|| anyhow!("missing env variable for uid")) .map(|ref uid| { let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat(); data_path.push(filename); data_path }) }) } pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> { self.state .worker_state .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() method first.")) .and_then(|mpsc| { if mpsc.recvd_payload.borrow().is_none() { // No payload received yet, try to talk to worker thread mpsc.rx .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() correctly!")) .and_then(|rx| { let rr = if try_flag { // don't block while trying to receive rx.try_recv().map_err(|e| anyhow!(e.to_string())) } else { // block while waiting to receive rx.recv().map_err(|e| anyhow!(e.to_string())) }; rr.and_then(|msg| { let msg_status = msg.map(|update_info| { // received good message, update cache for received payload *self.state.avail_release.borrow_mut() = update_info.clone(); // update last_check if received info is newer than last_check update_info.as_ref().map(|ui| { ui.fetched_at().map(|fetched_time| { if self.last_check().is_none() || self.last_check().as_ref().unwrap() < fetched_time { self.set_last_check(*fetched_time); } }) }); *mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info)); }); // save state regardless of content of msg self.save()?; msg_status?; Ok(()) }) })?; } Ok(()) })?; Ok(self .state .avail_release .borrow() .as_ref() .map_or(false, |release| *self.current_version() < release.version)) } #[allow(dead_code)] #[deprecated(note = "update_ready_async is deprecated. use init()")] pub(super) fn _update_ready_async(&self) -> Result<bool> { let worker_state = self.state.worker_state.borrow(); assert!(worker_state.is_some(), "you need to use init first"); let mpsc = worker_state.as_ref().expect("no worker_state"); if mpsc.recvd_payload.borrow().is_none() { let rx_option = mpsc.rx.borrow(); let
{ self.state.update_interval = t; }
identifier_body
imp.rs
.avail_release .borrow() .as_ref() .map(|ui| ui.version().clone()) } pub(super) fn borrow_worker(&self) -> Ref<'_, Option<MPSCState>> { self.worker_state.borrow() } pub(super) fn borrow_worker_mut(&self) -> RefMut<'_, Option<MPSCState>> { self.worker_state.borrow_mut() } pub(super) fn download_url(&self) -> Option<Url> { self.avail_release .borrow() .as_ref() .map(|info| info.downloadable_url.clone()) } } #[derive(Debug, Serialize, Deserialize, Clone)] pub(super) struct UpdateInfo { // Latest version available from github or releaser pub version: Version, pub fetched_at: Option<DateTime<Utc>>, // Link to use to download the above version pub downloadable_url: Url, } impl UpdateInfo { pub fn new(v: Version, url: Url) -> Self { UpdateInfo { version: v, fetched_at: None, downloadable_url: url, } } pub(super) fn version(&self) -> &Version { &self.version } pub(super) fn fetched_at(&self) -> Option<&DateTime<Utc>> { self.fetched_at.as_ref() } pub(super) fn set_fetched_at(&mut self, date_time: DateTime<Utc>) { self.fetched_at = Some(date_time); } } #[derive(Debug)] pub(super) struct MPSCState { // First successful call on rx.recv() will cache the results into this field recvd_payload: RefCell<Option<ReleasePayloadResult>>, // Receiver end of communication channel with worker thread rx: RefCell<Option<Receiver<ReleasePayloadResult>>>, } impl MPSCState { pub(super) fn new(rx: mpsc::Receiver<ReleasePayloadResult>) -> Self { MPSCState { recvd_payload: RefCell::new(None), rx: RefCell::new(Some(rx)), } } } impl<T> Updater<T> where T: Releaser + Send + 'static, { pub(super) fn load_or_new(r: T) -> Result<Self> { let _ = env_logger::try_init(); if let Ok(mut saved_state) = Self::load() { // Use the version that workflow reports through environment variable // This version takes priortiy over what we may have saved last time. let env_ver = env::workflow_version().and_then(|v| Version::parse(&v).ok()); if let Some(v) = env_ver { saved_state.current_version = v; } Ok(Updater { state: saved_state, releaser: RefCell::new(r), }) } else
} pub(super) fn last_check(&self) -> Option<DateTime<Utc>> { self.state.last_check.get() } pub(super) fn set_last_check(&self, t: DateTime<Utc>) { self.state.last_check.set(Some(t)); } pub(super) fn update_interval(&self) -> i64 { self.state.update_interval } pub(super) fn set_update_interval(&mut self, t: i64) { self.state.update_interval = t; } fn load() -> Result<UpdaterState> { let data_file_path = Self::build_data_fn()?; crate::Data::load_from_file(data_file_path) .ok_or_else(|| anyhow!("cannot load cached state of updater")) } // Save updater's state pub(super) fn save(&self) -> Result<()> { let data_file_path = Self::build_data_fn()?; crate::Data::save_to_file(&data_file_path, &self.state).map_err(|e| { let _r = remove_file(data_file_path); e }) } pub(super) fn start_releaser_worker( &self, tx: mpsc::Sender<ReleasePayloadResult>, p: PathBuf, ) -> Result<()> { use std::thread; let releaser = (*self.releaser.borrow()).clone(); thread::Builder::new().spawn(move || { debug!("other thread: starting in updater thread"); let talk_to_mother = || -> Result<()> { let (v, url) = releaser.latest_release()?; let mut info = UpdateInfo::new(v, url); info.set_fetched_at(Utc::now()); let payload = Some(info); Self::write_last_check_status(&p, &payload)?; tx.send(Ok(payload))?; Ok(()) }; let outcome = talk_to_mother(); debug!("other thread: finished checking releaser status"); if let Err(error) = outcome { tx.send(Err(error)) .expect("could not send error from thread"); } })?; Ok(()) } // write version of latest avail. release (if any) to a cache file pub(super) fn write_last_check_status( p: &Path, updater_info: &Option<UpdateInfo>, ) -> Result<()> { crate::Data::save_to_file(p, updater_info).map_err(|e| { let _r = remove_file(p); e }) } // read version of latest avail. release (if any) from a cache file pub(super) fn read_last_check_status(p: &Path) -> Result<Option<UpdateInfo>> { crate::Data::load_from_file(p).ok_or_else(|| anyhow!("no data in given path")) } pub(super) fn build_data_fn() -> Result<PathBuf> { let workflow_name = env::workflow_name() .unwrap_or_else(|| "YouForgotTo/フ:NameYourOwnWork}flowッ".to_string()) .chars() .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' }) .collect::<String>(); env::workflow_cache() .ok_or_else(|| { anyhow!("missing env variable for cache dir. forgot to set workflow bundle id?") }) .and_then(|mut data_path| { env::workflow_uid() .ok_or_else(|| anyhow!("missing env variable for uid")) .map(|ref uid| { let filename = [uid, "-", workflow_name.as_str(), "-updater.json"].concat(); data_path.push(filename); data_path }) }) } pub(super) fn update_ready_async(&self, try_flag: bool) -> Result<bool> { self.state .worker_state .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() method first.")) .and_then(|mpsc| { if mpsc.recvd_payload.borrow().is_none() { // No payload received yet, try to talk to worker thread mpsc.rx .borrow() .as_ref() .ok_or_else(|| anyhow!("you need to use init() correctly!")) .and_then(|rx| { let rr = if try_flag { // don't block while trying to receive rx.try_recv().map_err(|e| anyhow!(e.to_string())) } else { // block while waiting to receive rx.recv().map_err(|e| anyhow!(e.to_string())) }; rr.and_then(|msg| { let msg_status = msg.map(|update_info| { // received good message, update cache for received payload *self.state.avail_release.borrow_mut() = update_info.clone(); // update last_check if received info is newer than last_check update_info.as_ref().map(|ui| { ui.fetched_at().map(|fetched_time| { if self.last_check().is_none() || self.last_check().as_ref().unwrap() < fetched_time { self.set_last_check(*fetched_time); } }) }); *mpsc.recvd_payload.borrow_mut() = Some(Ok(update_info)); }); // save state regardless of content of msg self.save()?; msg_status?; Ok(()) }) })?; } Ok(()) })?; Ok(self .state .avail_release .borrow() .as_ref() .map_or(false, |release| *self.current_version() < release.version)) } #[allow(dead_code)] #[deprecated(note = "update_ready_async is deprecated. use init()")] pub(super) fn _update_ready_async(&self) -> Result<bool> { let worker_state = self.state.worker_state.borrow(); assert!(worker_state.is_some(), "you need to use init first"); let mpsc = worker_state.as_ref().expect("no worker_state"); if mpsc.recvd_payload.borrow().is_none() { let rx_option = mpsc.rx.borrow(); let
{ let current_version = env::workflow_version() .map_or_else(|| Ok(Version::new(0, 0, 0)), |v| Version::parse(&v))?; let state = UpdaterState { current_version, last_check: Cell::new(None), avail_release: RefCell::new(None), worker_state: RefCell::new(None), update_interval: UPDATE_INTERVAL, }; let updater = Updater { state, releaser: RefCell::new(r), }; updater.save()?; Ok(updater) }
conditional_block
pbms.rs
product bundle")?; fetch_data_for_product_bundle_v1(&product_bundle, &url, local_repo_dir, auth_flow, ui).await } /// Helper for `get_product_data()`, see docs there. pub async fn fetch_data_for_product_bundle_v1<I>( product_bundle: &sdk_metadata::ProductBundleV1, product_url: &url::Url, local_repo_dir: &std::path::Path, auth_flow: &AuthFlowChoice, ui: &I, ) -> Result<()> where I: structured_ui::Interface + Sync, { // Handy debugging switch to disable images download. let temp_dir = TempDir::new_in(&local_repo_dir)?; let temp_path = temp_dir.path(); if true { let start = std::time::Instant::now(); tracing::info!( "Getting product data for {:?} to {:?}", product_bundle.name, local_repo_dir ); let local_dir = temp_path.join("images"); async_fs::create_dir_all(&local_dir).await.context("creating directory")?; for image in &product_bundle.images { tracing::debug!("image {:?}", image); let base_url = make_remote_url(product_url, &image.base_uri).context("image.base_uri")?; if !exists_in_gcs(&base_url.as_str(), auth_flow, ui).await? { tracing::warn!("The base_uri does not exist: {}", base_url); } fetch_by_format( &image.format, &base_url, &local_dir, auth_flow, &|d, f| { let mut progress = structured_ui::Progress::builder(); progress.title(&product_bundle.name); progress.entry("Image data", /*at=*/ 1, /*of=*/ 2, "steps"); progress.entry(&d.name, d.at, d.of, "files"); progress.entry(&f.name, f.at, f.of, "bytes"); ui.present(&structured_ui::Presentation::Progress(progress))?; Ok(ProgressResponse::Continue) }, ui, ) .await .with_context(|| format!("fetching images for {}.", product_bundle.name))?; } tracing::debug!("Total fetch images runtime {} seconds.", start.elapsed().as_secs_f32()); } // Handy debugging switch to disable packages download. if true { let start = std::time::Instant::now(); let local_dir = temp_path.join("packages"); async_fs::create_dir_all(&local_dir).await.context("creating directory")?; tracing::info!( "Getting package data for {:?}, local_dir {:?}", product_bundle.name, local_dir ); fetch_package_repository_from_mirrors( product_url, &local_dir, &product_bundle.packages, auth_flow, &|d, f| { let mut progress = structured_ui::Progress::builder(); progress.title(&product_bundle.name); progress.entry("Package data", /*at=*/ 2, /*at=*/ 2, "steps"); progress.entry(&d.name, d.at, d.of, "files"); progress.entry(&f.name, f.at, f.of, "bytes"); ui.present(&structured_ui::Presentation::Progress(progress))?; Ok(ProgressResponse::Continue) }, ui, ) .await .with_context(|| { format!( "fetch_package_repository_from_mirrors {:?}, local_dir {:?}", product_url, local_dir ) })?; tracing::debug!("Total fetch packages runtime {} seconds.", start.elapsed().as_secs_f32()); } let final_name = local_repo_dir.join(&product_bundle.name); tracing::info!("Download of product data for {:?} is complete.", product_bundle.name); tracing::info!("Renaming temporary directory to {}", final_name.display()); fs::rename(temp_path, final_name).expect("Renaming temp directory failed."); tracing::info!("Data written to \"{}\".", local_repo_dir.display()); Ok(()) } /// Generate a (likely) unique name for the URL. /// /// URLs don't always make good file paths. pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String { let mut gcs_url = gcs_url.to_owned(); gcs_url.set_fragment(None); use std::collections::hash_map::DefaultHasher; use std::hash::Hash; use std::hash::Hasher; let mut s = DefaultHasher::new(); gcs_url.as_str().hash(&mut s); let out = s.finish(); tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out); format!("{}", out) } /// Download and expand data. /// /// For a directory, all files in the directory are downloaded. /// For a .tgz file, the file is downloaded and expanded. async fn fetch_by_format<F, I>( format: &str, uri: &url::Url, local_dir: &Path, auth_flow: &AuthFlowChoice, progress: &F, ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_by_format"); match format { "files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await, _ => // The schema currently defines only "files" or "tgz" (see RFC-100). // This error could be a typo in the product bundle or a new image // format has been added and this code needs an update. { bail!( "Unexpected image format ({:?}) in product bundle. \ Supported formats are \"files\" and \"tgz\". \ Please report as a bug.", format, ) } } } /// Download data from any of the supported schemes listed in RFC-100, Product /// Bundle, "bundle_uri". /// /// Currently: "pattern": "^(?:http|https|gs|file):\/\/" pub(crate) async fn fetch_bundle_uri<F, I>( product_url: &url::Url, local_dir: &Path, auth_flow: &AuthFlowChoice, progress: &F, ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_bundle_uri"); if product_url.scheme() == GS_SCHEME { fetch_from_gcs(product_url.as_str(), local_dir, auth_flow, progress, ui) .await .context("Downloading from GCS.")?; } else if product_url.scheme() == "http" || product_url.scheme() == "https" { fetch_from_web(product_url, local_dir, progress, ui) .await .context("fetching from http(s)")?; } else if let Some(_) = &path_from_file_url(product_url) { // Since the file is already local, no fetch is necessary. tracing::debug!("Found local file path {:?}", product_url); } else { bail!("Unexpected URI scheme in ({:?})", product_url); } Ok(()) } async fn fetch_from_web<F, I>( product_uri: &url::Url, local_dir: &Path, progress: &F, _ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_from_web"); let name = if let Some((_, name)) = product_uri.path().rsplit_once('/') { name } else { unimplemented!() }; if name.is_empty() { unimplemented!("downloading a directory from a web server is not implemented"); } let res = fuchsia_hyper::new_client() .get(hyper::Uri::from_maybe_shared(product_uri.to_string())?) .await .with_context(|| format!("Requesting {}", product_uri))?; match res.status() { StatusCode::OK => {} StatusCode::NOT_FOUND => { bail!("{} not found", product_uri); } status => { bail!("Unexpected HTTP status downloading {}: {}", product_uri, status); } } let mut at: u64 = 0; let length = if res.headers().contains_key(CONTENT_LENGTH) { res.headers() .get(CONTENT_LENGTH) .context("getting content length")? .to_str()? .parse::<u64>() .context("parsing content length")? } else { 0 }; std::fs::create_dir_all(local_dir) .with_context(|| format!("Creating {}", local_dir.display()))?; let path = local_dir.join(name); let mut file = File::create(&path).await.with_context(|| format!("Creating {}", path.display()))?; let mut stream = res.into_body(); let mut of = length; // Throttle the progress UI updates to avoid burning CPU on changes
// the user will have trouble seeing anyway. Without throttling, // around 20% of the execution time can be spent updating the // progress UI. The throttle makes the overhead negligible.
random_line_split
pbms.rs
add_dir: &str, dir: bool, sdk_root: &Path, ) -> Result<PathBuf> { assert!(!product_url.fragment().is_none()); if let Some(path) = &path_from_file_url(product_url) { if dir { // TODO(fxbug.dev/98009): Unify the file layout between local and remote // product bundles to avoid this hack. if path.starts_with(sdk_root) { Ok(sdk_root.to_path_buf()) } else { Ok(path.parent().expect("parent of file path").to_path_buf()) } } else { Ok(path.to_path_buf()) } } else { let url = url_sans_fragment(&product_url)?; Ok(get_product_dir(&url).await?.join(add_dir)) } } /// Retrieve the storage directory path from the config. pub async fn get_storage_dir() -> Result<PathBuf> { let storage_path: PathBuf = ffx_config::get(CONFIG_STORAGE_PATH).await.context("getting CONFIG_STORAGE_PATH")?; Ok(storage_path) } /// Retrieve the product directory path from the config. /// /// This is the storage path plus a hash of the `product_url` provided. pub async fn get_product_dir(product_url: &url::Url) -> Result<PathBuf>
/// Separate the URL on the last "#" character. /// /// If no "#" is found, use the whole input as the url. /// /// "file://foo#bar" -> "file://foo" /// "file://foo" -> "file://foo" pub(crate) fn url_sans_fragment(product_url: &url::Url) -> Result<url::Url> { let mut product_url = product_url.to_owned(); product_url.set_fragment(None); Ok(product_url) } /// Helper for `get_product_data()`, see docs there. pub(crate) async fn get_product_data_from_gcs<I>( product_url: &url::Url, local_repo_dir: &std::path::Path, auth_flow: &AuthFlowChoice, ui: &I, ) -> Result<()> where I: structured_ui::Interface + Sync, { tracing::debug!("get_product_data_from_gcs {:?} to {:?}", product_url, local_repo_dir); assert_eq!(product_url.scheme(), GS_SCHEME); let product_name = product_url.fragment().expect("URL with trailing product_name fragment."); let url = url_sans_fragment(product_url)?; fetch_product_metadata( &url, local_repo_dir, auth_flow, &mut |_d, _f| Ok(ProgressResponse::Continue), ui, ) .await .context("fetching metadata")?; let file_path = local_repo_dir.join("product_bundles.json"); if !file_path.is_file() { bail!("product_bundles.json not found {:?}.", file_path); } let mut entries = Entries::new(); entries.add_from_path(&file_path).context("adding entries from gcs")?; let product_bundle = find_product_bundle(&entries, &Some(product_name.to_string())) .context("finding product bundle")?; fetch_data_for_product_bundle_v1(&product_bundle, &url, local_repo_dir, auth_flow, ui).await } /// Helper for `get_product_data()`, see docs there. pub async fn fetch_data_for_product_bundle_v1<I>( product_bundle: &sdk_metadata::ProductBundleV1, product_url: &url::Url, local_repo_dir: &std::path::Path, auth_flow: &AuthFlowChoice, ui: &I, ) -> Result<()> where I: structured_ui::Interface + Sync, { // Handy debugging switch to disable images download. let temp_dir = TempDir::new_in(&local_repo_dir)?; let temp_path = temp_dir.path(); if true { let start = std::time::Instant::now(); tracing::info!( "Getting product data for {:?} to {:?}", product_bundle.name, local_repo_dir ); let local_dir = temp_path.join("images"); async_fs::create_dir_all(&local_dir).await.context("creating directory")?; for image in &product_bundle.images { tracing::debug!("image {:?}", image); let base_url = make_remote_url(product_url, &image.base_uri).context("image.base_uri")?; if !exists_in_gcs(&base_url.as_str(), auth_flow, ui).await? { tracing::warn!("The base_uri does not exist: {}", base_url); } fetch_by_format( &image.format, &base_url, &local_dir, auth_flow, &|d, f| { let mut progress = structured_ui::Progress::builder(); progress.title(&product_bundle.name); progress.entry("Image data", /*at=*/ 1, /*of=*/ 2, "steps"); progress.entry(&d.name, d.at, d.of, "files"); progress.entry(&f.name, f.at, f.of, "bytes"); ui.present(&structured_ui::Presentation::Progress(progress))?; Ok(ProgressResponse::Continue) }, ui, ) .await .with_context(|| format!("fetching images for {}.", product_bundle.name))?; } tracing::debug!("Total fetch images runtime {} seconds.", start.elapsed().as_secs_f32()); } // Handy debugging switch to disable packages download. if true { let start = std::time::Instant::now(); let local_dir = temp_path.join("packages"); async_fs::create_dir_all(&local_dir).await.context("creating directory")?; tracing::info!( "Getting package data for {:?}, local_dir {:?}", product_bundle.name, local_dir ); fetch_package_repository_from_mirrors( product_url, &local_dir, &product_bundle.packages, auth_flow, &|d, f| { let mut progress = structured_ui::Progress::builder(); progress.title(&product_bundle.name); progress.entry("Package data", /*at=*/ 2, /*at=*/ 2, "steps"); progress.entry(&d.name, d.at, d.of, "files"); progress.entry(&f.name, f.at, f.of, "bytes"); ui.present(&structured_ui::Presentation::Progress(progress))?; Ok(ProgressResponse::Continue) }, ui, ) .await .with_context(|| { format!( "fetch_package_repository_from_mirrors {:?}, local_dir {:?}", product_url, local_dir ) })?; tracing::debug!("Total fetch packages runtime {} seconds.", start.elapsed().as_secs_f32()); } let final_name = local_repo_dir.join(&product_bundle.name); tracing::info!("Download of product data for {:?} is complete.", product_bundle.name); tracing::info!("Renaming temporary directory to {}", final_name.display()); fs::rename(temp_path, final_name).expect("Renaming temp directory failed."); tracing::info!("Data written to \"{}\".", local_repo_dir.display()); Ok(()) } /// Generate a (likely) unique name for the URL. /// /// URLs don't always make good file paths. pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String { let mut gcs_url = gcs_url.to_owned(); gcs_url.set_fragment(None); use std::collections::hash_map::DefaultHasher; use std::hash::Hash; use std::hash::Hasher; let mut s = DefaultHasher::new(); gcs_url.as_str().hash(&mut s); let out = s.finish(); tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out); format!("{}", out) } /// Download and expand data. /// /// For a directory, all files in the directory are downloaded. /// For a .tgz file, the file is downloaded and expanded. async fn fetch_by_format<F, I>( format: &str, uri: &url::Url, local_dir: &Path, auth_flow: &AuthFlowChoice, progress: &F, ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_by_format"); match format { "files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await, _ => // The schema currently defines only "files" or "tgz" (see RFC-100). // This error could be a typo in the product bundle or a new image // format has been added and this code needs an update. { bail!( "Unexpected image format ({:?}) in product bundle. \ Supported formats are \"files\" and \"tgz\". \ Please report as a bug.", format, ) } } } /// Download data from any of the supported schemes listed in RFC-100, Product /// Bundle, "bundle_uri". /// /// Currently: "pattern": "^(?:http|https|gs|file):\/\/" pub(crate) async fn fetch_bundle_uri<F, I>( product_url: &url::Url, local_dir:
{ Ok(get_storage_dir().await?.join(pb_dir_name(product_url))) }
identifier_body
pbms.rs
paths. pub(crate) fn pb_dir_name(gcs_url: &url::Url) -> String { let mut gcs_url = gcs_url.to_owned(); gcs_url.set_fragment(None); use std::collections::hash_map::DefaultHasher; use std::hash::Hash; use std::hash::Hasher; let mut s = DefaultHasher::new(); gcs_url.as_str().hash(&mut s); let out = s.finish(); tracing::debug!("pb_dir_name {:?}, hash {:?}", gcs_url, out); format!("{}", out) } /// Download and expand data. /// /// For a directory, all files in the directory are downloaded. /// For a .tgz file, the file is downloaded and expanded. async fn fetch_by_format<F, I>( format: &str, uri: &url::Url, local_dir: &Path, auth_flow: &AuthFlowChoice, progress: &F, ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_by_format"); match format { "files" | "tgz" => fetch_bundle_uri(uri, &local_dir, auth_flow, progress, ui).await, _ => // The schema currently defines only "files" or "tgz" (see RFC-100). // This error could be a typo in the product bundle or a new image // format has been added and this code needs an update. { bail!( "Unexpected image format ({:?}) in product bundle. \ Supported formats are \"files\" and \"tgz\". \ Please report as a bug.", format, ) } } } /// Download data from any of the supported schemes listed in RFC-100, Product /// Bundle, "bundle_uri". /// /// Currently: "pattern": "^(?:http|https|gs|file):\/\/" pub(crate) async fn fetch_bundle_uri<F, I>( product_url: &url::Url, local_dir: &Path, auth_flow: &AuthFlowChoice, progress: &F, ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_bundle_uri"); if product_url.scheme() == GS_SCHEME { fetch_from_gcs(product_url.as_str(), local_dir, auth_flow, progress, ui) .await .context("Downloading from GCS.")?; } else if product_url.scheme() == "http" || product_url.scheme() == "https" { fetch_from_web(product_url, local_dir, progress, ui) .await .context("fetching from http(s)")?; } else if let Some(_) = &path_from_file_url(product_url) { // Since the file is already local, no fetch is necessary. tracing::debug!("Found local file path {:?}", product_url); } else { bail!("Unexpected URI scheme in ({:?})", product_url); } Ok(()) } async fn fetch_from_web<F, I>( product_uri: &url::Url, local_dir: &Path, progress: &F, _ui: &I, ) -> Result<()> where F: Fn(DirectoryProgress<'_>, FileProgress<'_>) -> ProgressResult, I: structured_ui::Interface + Sync, { tracing::debug!("fetch_from_web"); let name = if let Some((_, name)) = product_uri.path().rsplit_once('/') { name } else { unimplemented!() }; if name.is_empty() { unimplemented!("downloading a directory from a web server is not implemented"); } let res = fuchsia_hyper::new_client() .get(hyper::Uri::from_maybe_shared(product_uri.to_string())?) .await .with_context(|| format!("Requesting {}", product_uri))?; match res.status() { StatusCode::OK => {} StatusCode::NOT_FOUND => { bail!("{} not found", product_uri); } status => { bail!("Unexpected HTTP status downloading {}: {}", product_uri, status); } } let mut at: u64 = 0; let length = if res.headers().contains_key(CONTENT_LENGTH) { res.headers() .get(CONTENT_LENGTH) .context("getting content length")? .to_str()? .parse::<u64>() .context("parsing content length")? } else { 0 }; std::fs::create_dir_all(local_dir) .with_context(|| format!("Creating {}", local_dir.display()))?; let path = local_dir.join(name); let mut file = File::create(&path).await.with_context(|| format!("Creating {}", path.display()))?; let mut stream = res.into_body(); let mut of = length; // Throttle the progress UI updates to avoid burning CPU on changes // the user will have trouble seeing anyway. Without throttling, // around 20% of the execution time can be spent updating the // progress UI. The throttle makes the overhead negligible. let mut throttle = Throttle::from_duration(std::time::Duration::from_millis(500)); let url = product_uri.to_string(); while let Some(chunk) = stream.try_next().await.with_context(|| format!("Downloading {}", product_uri))? { file.write_all(&chunk).await.with_context(|| format!("Writing {}", path.display()))?; at += chunk.len() as u64; if at > of { of = at; } if throttle.is_ready() { match progress( DirectoryProgress { name: &url, at: 0, of: 1, units: "files" }, FileProgress { name: &url, at, of, units: "bytes" }, ) .context("rendering progress")? { ProgressResponse::Cancel => break, _ => (), } } } file.close().await.with_context(|| format!("Closing {}", path.display()))?; Ok(()) } /// If internal_url is a file scheme, join `product_url` and `internal_url`. /// Otherwise, return `internal_url`. pub(crate) fn make_remote_url(product_url: &url::Url, internal_url: &str) -> Result<url::Url> { let result = if let Some(remainder) = internal_url.strip_prefix("file:/") { // Note: The product_url must either be a path to the product_bundle.json file or to the // parent directory (with a trailing slash). product_url.join(remainder)? } else { url::Url::parse(&internal_url).with_context(|| format!("parsing url {:?}", internal_url))? }; tracing::debug!( "make_remote_url product_url {:?}, internal_url {:?}, result {:?}", product_url, internal_url, result ); Ok(result) } #[cfg(test)] mod tests { use super::*; #[fuchsia_async::run_singlethreaded(test)] async fn test_path_from_file_url() { let input = url::Url::parse("fake://foo#bar").expect("url"); let output = path_from_file_url(&input); assert!(output.is_none()); let input = url::Url::parse("file:///../../foo#bar").expect("url"); let output = path_from_file_url(&input); assert_eq!(output, Some(Path::new("/foo").to_path_buf())); let input = url::Url::parse("file://foo#bar").expect("url"); let output = path_from_file_url(&input); assert!(output.is_none()); let input = url::Url::parse("file:///foo#bar").expect("url"); let output = path_from_file_url(&input); assert_eq!(output, Some(Path::new("/foo").to_path_buf())); let temp_dir = tempfile::TempDir::new().expect("temp dir"); let base_url = url::Url::from_directory_path(temp_dir.path().join("a/b/c/d")).expect("url"); let input = url::Url::options().base_url(Some(&base_url)).parse("../../foo#bar").expect("url"); let output = path_from_file_url(&input); assert_eq!(output, Some(temp_dir.path().join("a/b/foo").to_path_buf())); } #[fuchsia_async::run_singlethreaded(test)] async fn test_url_sans_fragment() { let input = url::Url::parse("fake://foo#bar").expect("url"); let output = url_sans_fragment(&input).expect("sans fragment"); assert_eq!(output, url::Url::parse("fake://foo").expect("check url")); let input = url::Url::parse("fake://foo").expect("url"); let output = url_sans_fragment(&input).expect("sans fragment"); assert_eq!(output, url::Url::parse("fake://foo").expect("check url")); } // Disabling this test until a test config can be modified without altering // the local user's config. #[ignore] #[fuchsia_async::run_singlethreaded(test)] async fn
test_local_path_helper
identifier_name
lib.rs
will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler. //! This is especially useful when surrounding blocks of CSS. extern crate proc_macro; use proc_macro2::TokenStream; use quote::quote; use syn; use syn::parse::Parse; use syn::parse_macro_input; use std::borrow::Cow; use std::str::FromStr; /// Generates functions for rendering a template. /// Should be applied to a `struct` declaration. /// /// ## Meta Items /// `#[template]`'s first meta item must be a string literal /// representing the path to the template file. /// /// Subsequent attributes must be in `key=value` format. Currently, /// the following keys are supported: /// /// | Key | Possible Values | Default Value | Description | /// |---|---|---|---| /// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '&lt;', '&gt;', and '&amp;' respectively. If `"txt"` is selected, no escaping is performed. | /// /// ## Generated Methods /// /// `#[template]` will generate two associated methods with the following signatures: /// /// ```no_run /// # use std::io; /// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>; /// pub fn render_string(&self) -> io::Result<String>; /// ``` #[proc_macro_attribute] pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream { let item = parse_macro_input!(item as syn::ItemStruct); let attr = parse_macro_input!(attr as syn::AttributeArgs); let path = match &attr[0] { syn::NestedMeta::Lit(lit) => { match lit { syn::Lit::Str(s) => { s.value() }, _ => panic!("#[template]: expected string literal for path") } }, _ => { panic!("#[template]: expected string literal for path") } }; let mut escape = "txt".to_string(); for attr in &attr[1..] { match attr { syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => { let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path"); match ident.to_string().as_str() { "escape" => { let type_ = match &val.lit { syn::Lit::Str(s) => s.value(), _ => panic!("#[template]: attribute 'escape' must have string value") }; escape = type_; }, _ => panic!("#[template]: unknown attribute key '{}'", ident) } }, _ => panic!("#[template]: expected name = value") } } let escape_func = match escape.as_str() { "txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) }, "html" => |s: syn::Expr| -> TokenStream { let q = quote! { ::std::string::ToString::to_string(&(#s)).replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") }; q }, _ => panic!("#[template]: unknown escape type: {}", escape) }; eprintln!("{:?}", std::env::current_dir().unwrap()); let template = std::fs::read_to_string(path).unwrap(); let parts = PartIterator(TokenIterator::new(&template)); let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]]; for part in parts { match part { Part::Text(t) => { let last = vecs.len()-1; vecs[last].push(quote! { write!(writer, "{}", #t)?; }.into()); }, Part::Expr(expr, raw) => { let last = vecs.len()-1; if raw { vecs[last].push(quote! { write!(writer, "{}", #expr)?; }.into()); } else { let tokens = escape_func(expr); vecs[last].push(quote! { write!(writer, "{}", #tokens)?; }.into()); } }, Part::Stmt(stmt) => { let last = vecs.len()-1; vecs[last].push(stmt.into_iter() .chain( std::array::IntoIter::new([ proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into() ]) ) .collect::<TokenStream>()); }, Part::GroupStart(tokens) => { let last = vecs.len()-1; vecs[last].push(tokens); vecs.push(vec![]); }, Part::GroupEnd => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); }, Part::GroupStartEnd(tokens) => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); vecs[last].push(tokens); vecs.push(vec![]); } } } let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>(); let item_ = item.clone(); let name = item.ident; let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl(); let q = quote! { #item_ impl #impl_gen #name #type_gen #where_clause { pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> { #code Ok(()) } pub fn render_string(&self) -> ::std::io::Result<String> { let mut buf: Vec<u8> = Vec::new(); self.render(&mut buf)?; Ok(String::from_utf8_lossy(&buf).into_owned()) } } }; q.into() } #[derive(Clone)] enum Part { Text(String), Expr(syn::Expr, bool), Stmt(TokenStream), GroupStart(TokenStream), GroupStartEnd(TokenStream), GroupEnd, } struct PartIterator<'i>(pub TokenIterator<'i>); impl<'i> Iterator for PartIterator<'i> { type Item = Part; fn next(&mut self) -> Option<Part> { let tok = self.0.next()?; Some(match tok { Token::Text(t) => Part::Text(t.into_owned()), Token::Expr(t, raw) => { let expr = syn::parse_str(t).unwrap(); Part::Expr(expr, raw) }, Token::Stmt(t) => { let tokens = TokenStream::from_str(t).unwrap(); match tokens.clone().into_iter().next() { Some(proc_macro2::TokenTree::Ident(ident)) => { match ident.to_string().as_str() { "for" | "if" | "match" | "while" | "loop" => { Part::GroupStart(tokens) }, "else" => { Part::GroupStartEnd(tokens) } "case" => { Part::GroupStart( tokens.into_iter().skip(1) .chain( std::array::IntoIter::new([ proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(), proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(), ]) ) .collect()) }, "block" => { Part::GroupStart(TokenStream::new()) }, "end" => { Part::GroupEnd }, "include" => { let tokens = tokens.into_iter().skip(1).collect::<TokenStream>(); Part::Stmt(quote! { (#tokens).render(writer)? }) }, _ => Part::Stmt(tokens) } }, _ => { Part::Stmt(tokens) } } } }) } } #[derive(Clone, Debug)] enum Token<'i> { Text(Cow<'i, str>), Expr(&'i str, bool), Stmt(&'i str), } struct TokenIterator<'i> { src: &'i str, chars: std::iter::Peekable<std::str::CharIndices<'i>>, } impl<'i> TokenIterator<'i> { pub fn new(src: &'i str) -> Self
{ Self { src, chars: src.char_indices().peekable(), } }
identifier_body
lib.rs
//! This is especially useful when surrounding blocks of CSS. extern crate proc_macro; use proc_macro2::TokenStream; use quote::quote; use syn; use syn::parse::Parse; use syn::parse_macro_input; use std::borrow::Cow; use std::str::FromStr; /// Generates functions for rendering a template. /// Should be applied to a `struct` declaration. /// /// ## Meta Items /// `#[template]`'s first meta item must be a string literal /// representing the path to the template file. /// /// Subsequent attributes must be in `key=value` format. Currently, /// the following keys are supported: /// /// | Key | Possible Values | Default Value | Description | /// |---|---|---|---| /// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '&lt;', '&gt;', and '&amp;' respectively. If `"txt"` is selected, no escaping is performed. | /// /// ## Generated Methods /// /// `#[template]` will generate two associated methods with the following signatures: /// /// ```no_run /// # use std::io; /// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>; /// pub fn render_string(&self) -> io::Result<String>; /// ``` #[proc_macro_attribute] pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream { let item = parse_macro_input!(item as syn::ItemStruct); let attr = parse_macro_input!(attr as syn::AttributeArgs); let path = match &attr[0] { syn::NestedMeta::Lit(lit) => { match lit { syn::Lit::Str(s) => { s.value() }, _ => panic!("#[template]: expected string literal for path") } }, _ => { panic!("#[template]: expected string literal for path") } }; let mut escape = "txt".to_string(); for attr in &attr[1..] { match attr { syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => { let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path"); match ident.to_string().as_str() { "escape" => { let type_ = match &val.lit { syn::Lit::Str(s) => s.value(), _ => panic!("#[template]: attribute 'escape' must have string value") }; escape = type_; }, _ => panic!("#[template]: unknown attribute key '{}'", ident) } }, _ => panic!("#[template]: expected name = value") } } let escape_func = match escape.as_str() { "txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) }, "html" => |s: syn::Expr| -> TokenStream { let q = quote! { ::std::string::ToString::to_string(&(#s)).replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") }; q }, _ => panic!("#[template]: unknown escape type: {}", escape) }; eprintln!("{:?}", std::env::current_dir().unwrap()); let template = std::fs::read_to_string(path).unwrap(); let parts = PartIterator(TokenIterator::new(&template)); let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]]; for part in parts { match part { Part::Text(t) => { let last = vecs.len()-1; vecs[last].push(quote! { write!(writer, "{}", #t)?; }.into()); }, Part::Expr(expr, raw) => { let last = vecs.len()-1; if raw { vecs[last].push(quote! { write!(writer, "{}", #expr)?; }.into()); } else { let tokens = escape_func(expr); vecs[last].push(quote! { write!(writer, "{}", #tokens)?; }.into()); } }, Part::Stmt(stmt) => { let last = vecs.len()-1; vecs[last].push(stmt.into_iter() .chain( std::array::IntoIter::new([ proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into() ]) ) .collect::<TokenStream>()); }, Part::GroupStart(tokens) => { let last = vecs.len()-1; vecs[last].push(tokens); vecs.push(vec![]); }, Part::GroupEnd => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); }, Part::GroupStartEnd(tokens) => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); vecs[last].push(tokens); vecs.push(vec![]); } } } let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>(); let item_ = item.clone(); let name = item.ident; let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl(); let q = quote! { #item_ impl #impl_gen #name #type_gen #where_clause { pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> { #code Ok(()) } pub fn render_string(&self) -> ::std::io::Result<String> { let mut buf: Vec<u8> = Vec::new(); self.render(&mut buf)?; Ok(String::from_utf8_lossy(&buf).into_owned()) } } }; q.into() } #[derive(Clone)] enum Part { Text(String), Expr(syn::Expr, bool), Stmt(TokenStream), GroupStart(TokenStream), GroupStartEnd(TokenStream), GroupEnd, } struct PartIterator<'i>(pub TokenIterator<'i>); impl<'i> Iterator for PartIterator<'i> { type Item = Part; fn next(&mut self) -> Option<Part> { let tok = self.0.next()?; Some(match tok { Token::Text(t) => Part::Text(t.into_owned()), Token::Expr(t, raw) => { let expr = syn::parse_str(t).unwrap(); Part::Expr(expr, raw) }, Token::Stmt(t) => { let tokens = TokenStream::from_str(t).unwrap(); match tokens.clone().into_iter().next() { Some(proc_macro2::TokenTree::Ident(ident)) => { match ident.to_string().as_str() { "for" | "if" | "match" | "while" | "loop" => { Part::GroupStart(tokens) }, "else" => { Part::GroupStartEnd(tokens) } "case" => { Part::GroupStart( tokens.into_iter().skip(1) .chain( std::array::IntoIter::new([ proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(), proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(), ]) ) .collect()) }, "block" => { Part::GroupStart(TokenStream::new()) }, "end" => { Part::GroupEnd }, "include" => { let tokens = tokens.into_iter().skip(1).collect::<TokenStream>(); Part::Stmt(quote! { (#tokens).render(writer)? }) }, _ => Part::Stmt(tokens) } }, _ => { Part::Stmt(tokens) } } } }) } } #[derive(Clone, Debug)] enum Token<'i> { Text(Cow<'i, str>), Expr(&'i str, bool), Stmt(&'i str), } struct TokenIterator<'i> { src: &'i str, chars: std::iter::Peekable<std::str::CharIndices<'i>>, } impl<'i> TokenIterator<'i> { pub fn new(src: &'i str) -> Self { Self { src, chars: src.char_indices().peekable(), } } } impl<'i> Iterator for TokenIterator<'i> { type Item = Token<'i>; fn
next
identifier_name
lib.rs
placed inside of an `if` block. //! - `{:end}` ends an `if` block. //! //! #### `match` //! - `{:match EXPR}` begins an `match` block. //! - `{:case PATTERN}` begins a `case` block and must be placed inside of a `match` block. //! `PATTERN` can be any pattern that is accepted by rust inside of a `match` statement. //! - `{:end}` ends a `match` block or a `case` block. //! //! #### `loop`, `while`, `for` //! - `{:loop}`, `{:while EXPR}`, `{:for PATTERN in ITER}` function just like their Rust counterparts //! and begin their corresponding blocks //! - `{:end}` ends a loop block. //! //! #### `block` //! You can use `{:block}` / `{:end}` to create a separate scope for variables, in case you don't want //! symbols leaking into the surrounding scope. //! //! #### `include` //! Use `{:include EXPR}`, where `EXPR` is a litem template, to include one template inside of another. //! //! ### Raw Text //! `{# RAW TEXT }` will be substituted with `{ RAW TEXT }`. Braces in `RAW TEXT` will be ignored by the transpiler. //! This is especially useful when surrounding blocks of CSS. extern crate proc_macro; use proc_macro2::TokenStream; use quote::quote; use syn; use syn::parse::Parse; use syn::parse_macro_input; use std::borrow::Cow; use std::str::FromStr; /// Generates functions for rendering a template. /// Should be applied to a `struct` declaration. /// /// ## Meta Items /// `#[template]`'s first meta item must be a string literal /// representing the path to the template file. /// /// Subsequent attributes must be in `key=value` format. Currently, /// the following keys are supported: /// /// | Key | Possible Values | Default Value | Description | /// |---|---|---|---| /// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '&lt;', '&gt;', and '&amp;' respectively. If `"txt"` is selected, no escaping is performed. | /// /// ## Generated Methods /// /// `#[template]` will generate two associated methods with the following signatures: /// /// ```no_run /// # use std::io; /// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>; /// pub fn render_string(&self) -> io::Result<String>; /// ``` #[proc_macro_attribute] pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream { let item = parse_macro_input!(item as syn::ItemStruct); let attr = parse_macro_input!(attr as syn::AttributeArgs); let path = match &attr[0] { syn::NestedMeta::Lit(lit) => { match lit { syn::Lit::Str(s) => { s.value() }, _ => panic!("#[template]: expected string literal for path") } }, _ => { panic!("#[template]: expected string literal for path") } }; let mut escape = "txt".to_string(); for attr in &attr[1..] { match attr { syn::NestedMeta::Meta(syn::Meta::NameValue(val)) =>
, _ => panic!("#[template]: expected name = value") } } let escape_func = match escape.as_str() { "txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) }, "html" => |s: syn::Expr| -> TokenStream { let q = quote! { ::std::string::ToString::to_string(&(#s)).replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") }; q }, _ => panic!("#[template]: unknown escape type: {}", escape) }; eprintln!("{:?}", std::env::current_dir().unwrap()); let template = std::fs::read_to_string(path).unwrap(); let parts = PartIterator(TokenIterator::new(&template)); let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]]; for part in parts { match part { Part::Text(t) => { let last = vecs.len()-1; vecs[last].push(quote! { write!(writer, "{}", #t)?; }.into()); }, Part::Expr(expr, raw) => { let last = vecs.len()-1; if raw { vecs[last].push(quote! { write!(writer, "{}", #expr)?; }.into()); } else { let tokens = escape_func(expr); vecs[last].push(quote! { write!(writer, "{}", #tokens)?; }.into()); } }, Part::Stmt(stmt) => { let last = vecs.len()-1; vecs[last].push(stmt.into_iter() .chain( std::array::IntoIter::new([ proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into() ]) ) .collect::<TokenStream>()); }, Part::GroupStart(tokens) => { let last = vecs.len()-1; vecs[last].push(tokens); vecs.push(vec![]); }, Part::GroupEnd => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); }, Part::GroupStartEnd(tokens) => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); vecs[last].push(tokens); vecs.push(vec![]); } } } let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>(); let item_ = item.clone(); let name = item.ident; let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl(); let q = quote! { #item_ impl #impl_gen #name #type_gen #where_clause { pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> { #code Ok(()) } pub fn render_string(&self) -> ::std::io::Result<String> { let mut buf: Vec<u8> = Vec::new(); self.render(&mut buf)?; Ok(String::from_utf8_lossy(&buf).into_owned()) } } }; q.into() } #[derive(Clone)] enum Part { Text(String), Expr(syn::Expr, bool), Stmt(TokenStream), GroupStart(TokenStream), GroupStartEnd(TokenStream), GroupEnd, } struct PartIterator<'i>(pub TokenIterator<'i>); impl<'i> Iterator for PartIterator<'i> { type Item = Part; fn next(&mut self) -> Option<Part> { let tok = self.0.next()?; Some(match tok { Token::Text(t) => Part::Text(t.into_owned()), Token::Expr(t, raw) => { let expr = syn::parse_str(t).unwrap(); Part::Expr(expr, raw) }, Token::Stmt(t) => { let tokens = TokenStream::from_str(t).unwrap(); match tokens.clone().into_iter().next() { Some(proc_macro2::TokenTree::Ident(ident)) => { match ident.to_string().as_str() { "for" | "if" | "match" | "while" | "loop" => { Part::GroupStart(tokens) }, "else" => { Part::GroupStartEnd(tokens) } "case" => { Part::GroupStart( tokens.into_iter().skip(1) .chain( std::array::IntoIter::new([ proc_macro2::Punct::new('=', proc_macro2::Spacing::
{ let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path"); match ident.to_string().as_str() { "escape" => { let type_ = match &val.lit { syn::Lit::Str(s) => s.value(), _ => panic!("#[template]: attribute 'escape' must have string value") }; escape = type_; }, _ => panic!("#[template]: unknown attribute key '{}'", ident) } }
conditional_block
lib.rs
/// Subsequent attributes must be in `key=value` format. Currently, /// the following keys are supported: /// /// | Key | Possible Values | Default Value | Description | /// |---|---|---|---| /// | `escape` | `"txt", "html"` | `"txt"` | What escaping mode to use. If `"html"` is selected, `<`, `>`, and `&` will be changed to '&lt;', '&gt;', and '&amp;' respectively. If `"txt"` is selected, no escaping is performed. | /// /// ## Generated Methods /// /// `#[template]` will generate two associated methods with the following signatures: /// /// ```no_run /// # use std::io; /// pub fn render(&self, writer: &mut impl io::Write) -> io::Result<()>; /// pub fn render_string(&self) -> io::Result<String>; /// ``` #[proc_macro_attribute] pub fn template(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream { let item = parse_macro_input!(item as syn::ItemStruct); let attr = parse_macro_input!(attr as syn::AttributeArgs); let path = match &attr[0] { syn::NestedMeta::Lit(lit) => { match lit { syn::Lit::Str(s) => { s.value() }, _ => panic!("#[template]: expected string literal for path") } }, _ => { panic!("#[template]: expected string literal for path") } }; let mut escape = "txt".to_string(); for attr in &attr[1..] { match attr { syn::NestedMeta::Meta(syn::Meta::NameValue(val)) => { let ident = val.path.get_ident().expect("#[template]: expected name = value; name must be identifier, not path"); match ident.to_string().as_str() { "escape" => { let type_ = match &val.lit { syn::Lit::Str(s) => s.value(), _ => panic!("#[template]: attribute 'escape' must have string value") }; escape = type_; }, _ => panic!("#[template]: unknown attribute key '{}'", ident) } }, _ => panic!("#[template]: expected name = value") } } let escape_func = match escape.as_str() { "txt" => |s: syn::Expr| -> TokenStream { (quote! { #s }) }, "html" => |s: syn::Expr| -> TokenStream { let q = quote! { ::std::string::ToString::to_string(&(#s)).replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") }; q }, _ => panic!("#[template]: unknown escape type: {}", escape) }; eprintln!("{:?}", std::env::current_dir().unwrap()); let template = std::fs::read_to_string(path).unwrap(); let parts = PartIterator(TokenIterator::new(&template)); let mut vecs: Vec<Vec<TokenStream>> = vec![vec![]]; for part in parts { match part { Part::Text(t) => { let last = vecs.len()-1; vecs[last].push(quote! { write!(writer, "{}", #t)?; }.into()); }, Part::Expr(expr, raw) => { let last = vecs.len()-1; if raw { vecs[last].push(quote! { write!(writer, "{}", #expr)?; }.into()); } else { let tokens = escape_func(expr); vecs[last].push(quote! { write!(writer, "{}", #tokens)?; }.into()); } }, Part::Stmt(stmt) => { let last = vecs.len()-1; vecs[last].push(stmt.into_iter() .chain( std::array::IntoIter::new([ proc_macro2::Punct::new(';', proc_macro2::Spacing::Alone).into() ]) ) .collect::<TokenStream>()); }, Part::GroupStart(tokens) => { let last = vecs.len()-1; vecs[last].push(tokens); vecs.push(vec![]); }, Part::GroupEnd => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); }, Part::GroupStartEnd(tokens) => { let vec = vecs.pop().expect("unmatched {:end} node"); let last = vecs.len()-1; vecs[last].push( <std::array::IntoIter<proc_macro2::TokenTree, 1>>::new([ proc_macro2::Group::new(proc_macro2::Delimiter::Brace, vec.into_iter().collect()).into() ]) .collect() ); vecs[last].push(tokens); vecs.push(vec![]); } } } let code = vecs.into_iter().next().expect("unmatched {:end} node").into_iter().collect::<TokenStream>(); let item_ = item.clone(); let name = item.ident; let (impl_gen, type_gen, where_clause) = item.generics.split_for_impl(); let q = quote! { #item_ impl #impl_gen #name #type_gen #where_clause { pub fn render(&self, writer: &mut impl ::std::io::Write) -> ::std::io::Result<()> { #code Ok(()) } pub fn render_string(&self) -> ::std::io::Result<String> { let mut buf: Vec<u8> = Vec::new(); self.render(&mut buf)?; Ok(String::from_utf8_lossy(&buf).into_owned()) } } }; q.into() } #[derive(Clone)] enum Part { Text(String), Expr(syn::Expr, bool), Stmt(TokenStream), GroupStart(TokenStream), GroupStartEnd(TokenStream), GroupEnd, } struct PartIterator<'i>(pub TokenIterator<'i>); impl<'i> Iterator for PartIterator<'i> { type Item = Part; fn next(&mut self) -> Option<Part> { let tok = self.0.next()?; Some(match tok { Token::Text(t) => Part::Text(t.into_owned()), Token::Expr(t, raw) => { let expr = syn::parse_str(t).unwrap(); Part::Expr(expr, raw) }, Token::Stmt(t) => { let tokens = TokenStream::from_str(t).unwrap(); match tokens.clone().into_iter().next() { Some(proc_macro2::TokenTree::Ident(ident)) => { match ident.to_string().as_str() { "for" | "if" | "match" | "while" | "loop" => { Part::GroupStart(tokens) }, "else" => { Part::GroupStartEnd(tokens) } "case" => { Part::GroupStart( tokens.into_iter().skip(1) .chain( std::array::IntoIter::new([ proc_macro2::Punct::new('=', proc_macro2::Spacing::Joint).into(), proc_macro2::Punct::new('>', proc_macro2::Spacing::Alone).into(), ]) ) .collect()) }, "block" => { Part::GroupStart(TokenStream::new()) }, "end" => { Part::GroupEnd }, "include" => { let tokens = tokens.into_iter().skip(1).collect::<TokenStream>(); Part::Stmt(quote! { (#tokens).render(writer)? }) }, _ => Part::Stmt(tokens) } }, _ => { Part::Stmt(tokens) } } } }) } } #[derive(Clone, Debug)] enum Token<'i> { Text(Cow<'i, str>), Expr(&'i str, bool), Stmt(&'i str), } struct TokenIterator<'i> { src: &'i str, chars: std::iter::Peekable<std::str::CharIndices<'i>>, } impl<'i> TokenIterator<'i> { pub fn new(src: &'i str) -> Self { Self { src, chars: src.char_indices().peekable(), } } } impl<'i> Iterator for TokenIterator<'i> { type Item = Token<'i>; fn next(&mut self) -> Option<Token<'i>> { let (first_idx, first) = match self.chars.peek() { None => return None, Some(v) => *v, }; let mut n_braces = 0; let (final_idx, final_) = loop { let (idx, chr) = self.chars.next().unwrap(); let (next_idx, next_chr) = match self.chars.peek() {
None => { break (idx, chr); }, Some(x) => *x, };
random_line_split
driver.rs
2"))] fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) -> ! { use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder}; let imported_symbols = load_imported_symbols_for_jit(tcx); let mut jit_builder = SimpleJITBuilder::with_isa( crate::build_isa(tcx.sess, false), cranelift_module::default_libcall_names(), ); jit_builder.symbols(imported_symbols); let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder); assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type()); let sig = Signature { params: vec![ AbiParam::new(jit_module.target_config().pointer_type()), AbiParam::new(jit_module.target_config().pointer_type()), ], returns: vec![AbiParam::new( jit_module.target_config().pointer_type(), /*isize*/ )], call_conv: CallConv::SystemV, }; let main_func_id = jit_module .declare_function("main", Linkage::Import, &sig) .unwrap(); codegen_cgus(tcx, &mut jit_module, &mut None, log); crate::allocator::codegen(tcx.sess, &mut jit_module); jit_module.finalize_definitions(); tcx.sess.abort_if_errors(); let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id); println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set"); let f: extern "C" fn(c_int, *const *const c_char) -> c_int = unsafe { ::std::mem::transmute(finalized_main) }; let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new()); let args = args .split(" ") .chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())) .map(|arg| CString::new(arg).unwrap()) .collect::<Vec<_>>(); let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>(); // TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end let ret = f(args.len() as c_int, argv.as_ptr()); jit_module.finish(); std::process::exit(ret); } fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> { use rustc::middle::dependency_format::Linkage; let mut dylib_paths = Vec::new(); let crate_info = CrateInfo::new(tcx); let formats = tcx.sess.dependency_formats.borrow(); let data = formats.get(&CrateType::Executable).unwrap(); for &(cnum, _) in &crate_info.used_crates_dynamic { let src = &crate_info.used_crate_source[&cnum]; match data[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { let name = tcx.crate_name(cnum); let mut err = tcx .sess .struct_err(&format!("Can't load static lib {}", name.as_str())); err.note("rustc_codegen_cranelift can only load dylibs in JIT mode."); err.emit(); } Linkage::Dynamic => { dylib_paths.push(src.dylib.as_ref().unwrap().0.clone()); } } } let mut imported_symbols = Vec::new(); for path in dylib_paths { use object::Object; let lib = libloading::Library::new(&path).unwrap(); let obj = std::fs::read(path).unwrap(); let obj = object::File::parse(&obj).unwrap(); imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| { let name = symbol.name().unwrap().to_string(); if name.is_empty() || !symbol.is_global() || symbol.is_undefined() { return None; } let symbol: libloading::Symbol<*const u8> = unsafe { lib.get(name.as_bytes()) }.unwrap(); Some((name, *symbol)) })); std::mem::forget(lib) } tcx.sess.abort_if_errors(); imported_symbols } fn run_aot( tcx: TyCtxt<'_>, metadata: EncodedMetadata, need_metadata_module: bool, log: &mut Option<File>, ) -> Box<CodegenResults> { let new_module = |name: String| { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(tcx.sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); assert_eq!(pointer_ty(tcx), module.target_config().pointer_type()); module }; let emit_module = |kind: ModuleKind, mut module: Module<FaerieBackend>, debug: Option<DebugContext>| { module.finalize_definitions(); let mut artifact = module.finish().artifact; if let Some(mut debug) = debug { debug.emit(&mut artifact); } let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Object, Some(&artifact.name)); let obj = artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); CompiledModule { name: artifact.name, kind, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, } }; let mut faerie_module = new_module("some_file".to_string()); let mut debug = if tcx.sess.opts.debuginfo != DebugInfo::None // macOS debuginfo doesn't work yet (see #303) && !tcx.sess.target.target.options.is_like_osx { let debug = DebugContext::new( tcx, faerie_module.target_config().pointer_type().bytes() as u8, ); Some(debug) } else { None }; codegen_cgus(tcx, &mut faerie_module, &mut debug, log); tcx.sess.abort_if_errors(); let mut allocator_module = new_module("allocator_shim".to_string()); let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module); rustc_incremental::assert_dep_graph(tcx); rustc_incremental::save_dep_graph(tcx); rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE)); let metadata_module = if need_metadata_module { use rustc::mir::mono::CodegenUnitNameBuilder; let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); let metadata_cgu_name = cgu_name_builder .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")) .as_str() .to_string(); let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(tcx.sess, true).triple().clone(), metadata_cgu_name.clone(), ); crate::metadata::write_metadata(tcx, &mut metadata_artifact); let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Metadata, Some(&metadata_cgu_name)); let obj = metadata_artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); Some(CompiledModule { name: metadata_cgu_name, kind: ModuleKind::Metadata, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, }) } else { None }; Box::new(CodegenResults { crate_name: tcx.crate_name(LOCAL_CRATE), modules: vec![emit_module( ModuleKind::Regular, faerie_module, debug, )], allocator_module: if created_alloc_shim { Some(emit_module( ModuleKind::Allocator, allocator_module, None, )) } else { None }, metadata_module, crate_hash: tcx.crate_hash(LOCAL_CRATE), metadata, windows_subsystem: None, // Windows is not yet supported linker_info: LinkerInfo::new(tcx), crate_info: CrateInfo::new(tcx), }) } fn codegen_cgus<'tcx>( tcx: TyCtxt<'tcx>, module: &mut Module<impl Backend + 'static>, debug: &mut Option<DebugContext<'tcx>>, log: &mut Option<File>, )
{ let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); let mono_items = cgus .iter() .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter()) .flatten() .collect::<FxHashMap<_, (_, _)>>(); codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items); crate::main_shim::maybe_create_entry_wrapper(tcx, module); }
identifier_body
driver.rs
it(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> { use rustc::middle::dependency_format::Linkage; let mut dylib_paths = Vec::new(); let crate_info = CrateInfo::new(tcx); let formats = tcx.sess.dependency_formats.borrow(); let data = formats.get(&CrateType::Executable).unwrap(); for &(cnum, _) in &crate_info.used_crates_dynamic { let src = &crate_info.used_crate_source[&cnum]; match data[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { let name = tcx.crate_name(cnum); let mut err = tcx .sess .struct_err(&format!("Can't load static lib {}", name.as_str())); err.note("rustc_codegen_cranelift can only load dylibs in JIT mode."); err.emit(); } Linkage::Dynamic => { dylib_paths.push(src.dylib.as_ref().unwrap().0.clone()); } } } let mut imported_symbols = Vec::new(); for path in dylib_paths { use object::Object; let lib = libloading::Library::new(&path).unwrap(); let obj = std::fs::read(path).unwrap(); let obj = object::File::parse(&obj).unwrap(); imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| { let name = symbol.name().unwrap().to_string(); if name.is_empty() || !symbol.is_global() || symbol.is_undefined() { return None; } let symbol: libloading::Symbol<*const u8> = unsafe { lib.get(name.as_bytes()) }.unwrap(); Some((name, *symbol)) })); std::mem::forget(lib) } tcx.sess.abort_if_errors(); imported_symbols } fn run_aot( tcx: TyCtxt<'_>, metadata: EncodedMetadata, need_metadata_module: bool, log: &mut Option<File>, ) -> Box<CodegenResults> { let new_module = |name: String| { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(tcx.sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); assert_eq!(pointer_ty(tcx), module.target_config().pointer_type()); module }; let emit_module = |kind: ModuleKind, mut module: Module<FaerieBackend>, debug: Option<DebugContext>| { module.finalize_definitions(); let mut artifact = module.finish().artifact; if let Some(mut debug) = debug { debug.emit(&mut artifact); } let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Object, Some(&artifact.name)); let obj = artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); CompiledModule { name: artifact.name, kind, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, } }; let mut faerie_module = new_module("some_file".to_string()); let mut debug = if tcx.sess.opts.debuginfo != DebugInfo::None // macOS debuginfo doesn't work yet (see #303) && !tcx.sess.target.target.options.is_like_osx { let debug = DebugContext::new( tcx, faerie_module.target_config().pointer_type().bytes() as u8, ); Some(debug) } else { None }; codegen_cgus(tcx, &mut faerie_module, &mut debug, log); tcx.sess.abort_if_errors(); let mut allocator_module = new_module("allocator_shim".to_string()); let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module); rustc_incremental::assert_dep_graph(tcx); rustc_incremental::save_dep_graph(tcx); rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE)); let metadata_module = if need_metadata_module { use rustc::mir::mono::CodegenUnitNameBuilder; let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); let metadata_cgu_name = cgu_name_builder .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")) .as_str() .to_string(); let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(tcx.sess, true).triple().clone(), metadata_cgu_name.clone(), ); crate::metadata::write_metadata(tcx, &mut metadata_artifact); let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Metadata, Some(&metadata_cgu_name)); let obj = metadata_artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); Some(CompiledModule { name: metadata_cgu_name, kind: ModuleKind::Metadata, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, }) } else { None }; Box::new(CodegenResults { crate_name: tcx.crate_name(LOCAL_CRATE), modules: vec![emit_module( ModuleKind::Regular, faerie_module, debug, )], allocator_module: if created_alloc_shim { Some(emit_module( ModuleKind::Allocator, allocator_module, None, )) } else { None }, metadata_module, crate_hash: tcx.crate_hash(LOCAL_CRATE), metadata, windows_subsystem: None, // Windows is not yet supported linker_info: LinkerInfo::new(tcx), crate_info: CrateInfo::new(tcx), }) } fn codegen_cgus<'tcx>( tcx: TyCtxt<'tcx>, module: &mut Module<impl Backend + 'static>, debug: &mut Option<DebugContext<'tcx>>, log: &mut Option<File>, ) { let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); let mono_items = cgus .iter() .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter()) .flatten() .collect::<FxHashMap<_, (_, _)>>(); codegen_mono_items(tcx, module, debug.as_mut(), log, mono_items); crate::main_shim::maybe_create_entry_wrapper(tcx, module); } fn codegen_mono_items<'tcx>( tcx: TyCtxt<'tcx>, module: &mut Module<impl Backend + 'static>, debug_context: Option<&mut DebugContext<'tcx>>, log: &mut Option<File>, mono_items: FxHashMap<MonoItem<'tcx>, (RLinkage, Visibility)>, ) { let mut cx = CodegenCx::new(tcx, module, debug_context); time("codegen mono items", move || { for (mono_item, (linkage, visibility)) in mono_items { crate::unimpl::try_unimpl(tcx, log, || { let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility); trans_mono_item(&mut cx, mono_item, linkage); }); } cx.finalize(); }); } fn trans_mono_item<'clif, 'tcx, B: Backend + 'static>( cx: &mut crate::CodegenCx<'clif, 'tcx, B>, mono_item: MonoItem<'tcx>, linkage: Linkage, ) { let tcx = cx.tcx; match mono_item { MonoItem::Fn(inst) => { let _inst_guard = PrintOnPanic(|| format!("{:?} {}", inst, tcx.symbol_name(inst).name.as_str())); debug_assert!(!inst.substs.needs_infer()); let _mir_guard = PrintOnPanic(|| { match inst.def { InstanceDef::Item(_) | InstanceDef::DropGlue(_, _) | InstanceDef::Virtual(_, _) => { let mut mir = ::std::io::Cursor::new(Vec::new()); crate::rustc_mir::util::write_mir_pretty( tcx, Some(inst.def_id()), &mut mir, ) .unwrap(); String::from_utf8(mir.into_inner()).unwrap() } _ => { // FIXME fix write_mir_pretty for these instances format!("{:#?}", tcx.instance_mir(inst.def)) } } }); crate::base::trans_fn(cx, inst, linkage); } MonoItem::Static(def_id) => { crate::constant::codegen_static(&mut cx.constants_cx, def_id); } MonoItem::GlobalAsm(node_id) => tcx .sess .fatal(&format!("Unimplemented global asm mono item {:?}", node_id)), } } fn
time
identifier_name
driver.rs
32")] panic!("jit not supported on wasm"); } run_aot(tcx, metadata, need_metadata_module, &mut log) } #[cfg(not(target_arch = "wasm32"))] fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) -> ! { use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder}; let imported_symbols = load_imported_symbols_for_jit(tcx); let mut jit_builder = SimpleJITBuilder::with_isa( crate::build_isa(tcx.sess, false), cranelift_module::default_libcall_names(), ); jit_builder.symbols(imported_symbols); let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder); assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type()); let sig = Signature { params: vec![ AbiParam::new(jit_module.target_config().pointer_type()), AbiParam::new(jit_module.target_config().pointer_type()), ], returns: vec![AbiParam::new( jit_module.target_config().pointer_type(), /*isize*/ )], call_conv: CallConv::SystemV, }; let main_func_id = jit_module .declare_function("main", Linkage::Import, &sig)
jit_module.finalize_definitions(); tcx.sess.abort_if_errors(); let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id); println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set"); let f: extern "C" fn(c_int, *const *const c_char) -> c_int = unsafe { ::std::mem::transmute(finalized_main) }; let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new()); let args = args .split(" ") .chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())) .map(|arg| CString::new(arg).unwrap()) .collect::<Vec<_>>(); let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>(); // TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end let ret = f(args.len() as c_int, argv.as_ptr()); jit_module.finish(); std::process::exit(ret); } fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> { use rustc::middle::dependency_format::Linkage; let mut dylib_paths = Vec::new(); let crate_info = CrateInfo::new(tcx); let formats = tcx.sess.dependency_formats.borrow(); let data = formats.get(&CrateType::Executable).unwrap(); for &(cnum, _) in &crate_info.used_crates_dynamic { let src = &crate_info.used_crate_source[&cnum]; match data[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { let name = tcx.crate_name(cnum); let mut err = tcx .sess .struct_err(&format!("Can't load static lib {}", name.as_str())); err.note("rustc_codegen_cranelift can only load dylibs in JIT mode."); err.emit(); } Linkage::Dynamic => { dylib_paths.push(src.dylib.as_ref().unwrap().0.clone()); } } } let mut imported_symbols = Vec::new(); for path in dylib_paths { use object::Object; let lib = libloading::Library::new(&path).unwrap(); let obj = std::fs::read(path).unwrap(); let obj = object::File::parse(&obj).unwrap(); imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| { let name = symbol.name().unwrap().to_string(); if name.is_empty() || !symbol.is_global() || symbol.is_undefined() { return None; } let symbol: libloading::Symbol<*const u8> = unsafe { lib.get(name.as_bytes()) }.unwrap(); Some((name, *symbol)) })); std::mem::forget(lib) } tcx.sess.abort_if_errors(); imported_symbols } fn run_aot( tcx: TyCtxt<'_>, metadata: EncodedMetadata, need_metadata_module: bool, log: &mut Option<File>, ) -> Box<CodegenResults> { let new_module = |name: String| { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(tcx.sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); assert_eq!(pointer_ty(tcx), module.target_config().pointer_type()); module }; let emit_module = |kind: ModuleKind, mut module: Module<FaerieBackend>, debug: Option<DebugContext>| { module.finalize_definitions(); let mut artifact = module.finish().artifact; if let Some(mut debug) = debug { debug.emit(&mut artifact); } let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Object, Some(&artifact.name)); let obj = artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); CompiledModule { name: artifact.name, kind, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, } }; let mut faerie_module = new_module("some_file".to_string()); let mut debug = if tcx.sess.opts.debuginfo != DebugInfo::None // macOS debuginfo doesn't work yet (see #303) && !tcx.sess.target.target.options.is_like_osx { let debug = DebugContext::new( tcx, faerie_module.target_config().pointer_type().bytes() as u8, ); Some(debug) } else { None }; codegen_cgus(tcx, &mut faerie_module, &mut debug, log); tcx.sess.abort_if_errors(); let mut allocator_module = new_module("allocator_shim".to_string()); let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module); rustc_incremental::assert_dep_graph(tcx); rustc_incremental::save_dep_graph(tcx); rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE)); let metadata_module = if need_metadata_module { use rustc::mir::mono::CodegenUnitNameBuilder; let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); let metadata_cgu_name = cgu_name_builder .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")) .as_str() .to_string(); let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(tcx.sess, true).triple().clone(), metadata_cgu_name.clone(), ); crate::metadata::write_metadata(tcx, &mut metadata_artifact); let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Metadata, Some(&metadata_cgu_name)); let obj = metadata_artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); Some(CompiledModule { name: metadata_cgu_name, kind: ModuleKind::Metadata, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, }) } else { None }; Box::new(CodegenResults { crate_name: tcx.crate_name(LOCAL_CRATE), modules: vec![emit_module( ModuleKind::Regular, faerie_module, debug, )], allocator_module: if created_alloc_shim { Some(emit_module( ModuleKind::Allocator, allocator_module, None, )) } else { None }, metadata_module, crate_hash: tcx.crate_hash(LOCAL_CRATE), metadata, windows_subsystem: None, // Windows is not yet supported linker_info: LinkerInfo::new(tcx), crate_info: CrateInfo::new(tcx), }) } fn codegen_cgus<'tcx>( tcx: TyCtxt<'tcx>, module: &mut Module<impl Backend + 'static>, debug: &mut Option<DebugContext<'tcx>>, log: &mut Option<File>, ) { let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); let mono_items = cgus .iter() .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter()) .flatten() .collect::<FxHashMap
.unwrap(); codegen_cgus(tcx, &mut jit_module, &mut None, log); crate::allocator::codegen(tcx.sess, &mut jit_module);
random_line_split
driver.rs
run_aot(tcx, metadata, need_metadata_module, &mut log) } #[cfg(not(target_arch = "wasm32"))] fn run_jit(tcx: TyCtxt<'_>, log: &mut Option<File>) -> ! { use cranelift_simplejit::{SimpleJITBackend, SimpleJITBuilder}; let imported_symbols = load_imported_symbols_for_jit(tcx); let mut jit_builder = SimpleJITBuilder::with_isa( crate::build_isa(tcx.sess, false), cranelift_module::default_libcall_names(), ); jit_builder.symbols(imported_symbols); let mut jit_module: Module<SimpleJITBackend> = Module::new(jit_builder); assert_eq!(pointer_ty(tcx), jit_module.target_config().pointer_type()); let sig = Signature { params: vec![ AbiParam::new(jit_module.target_config().pointer_type()), AbiParam::new(jit_module.target_config().pointer_type()), ], returns: vec![AbiParam::new( jit_module.target_config().pointer_type(), /*isize*/ )], call_conv: CallConv::SystemV, }; let main_func_id = jit_module .declare_function("main", Linkage::Import, &sig) .unwrap(); codegen_cgus(tcx, &mut jit_module, &mut None, log); crate::allocator::codegen(tcx.sess, &mut jit_module); jit_module.finalize_definitions(); tcx.sess.abort_if_errors(); let finalized_main: *const u8 = jit_module.get_finalized_function(main_func_id); println!("Rustc codegen cranelift will JIT run the executable, because the SHOULD_RUN env var is set"); let f: extern "C" fn(c_int, *const *const c_char) -> c_int = unsafe { ::std::mem::transmute(finalized_main) }; let args = ::std::env::var("JIT_ARGS").unwrap_or_else(|_| String::new()); let args = args .split(" ") .chain(Some(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())) .map(|arg| CString::new(arg).unwrap()) .collect::<Vec<_>>(); let argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>(); // TODO: Rust doesn't care, but POSIX argv has a NULL sentinel at the end let ret = f(args.len() as c_int, argv.as_ptr()); jit_module.finish(); std::process::exit(ret); } fn load_imported_symbols_for_jit(tcx: TyCtxt<'_>) -> Vec<(String, *const u8)> { use rustc::middle::dependency_format::Linkage; let mut dylib_paths = Vec::new(); let crate_info = CrateInfo::new(tcx); let formats = tcx.sess.dependency_formats.borrow(); let data = formats.get(&CrateType::Executable).unwrap(); for &(cnum, _) in &crate_info.used_crates_dynamic { let src = &crate_info.used_crate_source[&cnum]; match data[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { let name = tcx.crate_name(cnum); let mut err = tcx .sess .struct_err(&format!("Can't load static lib {}", name.as_str())); err.note("rustc_codegen_cranelift can only load dylibs in JIT mode."); err.emit(); } Linkage::Dynamic => { dylib_paths.push(src.dylib.as_ref().unwrap().0.clone()); } } } let mut imported_symbols = Vec::new(); for path in dylib_paths { use object::Object; let lib = libloading::Library::new(&path).unwrap(); let obj = std::fs::read(path).unwrap(); let obj = object::File::parse(&obj).unwrap(); imported_symbols.extend(obj.dynamic_symbols().filter_map(|(_idx, symbol)| { let name = symbol.name().unwrap().to_string(); if name.is_empty() || !symbol.is_global() || symbol.is_undefined() { return None; } let symbol: libloading::Symbol<*const u8> = unsafe { lib.get(name.as_bytes()) }.unwrap(); Some((name, *symbol)) })); std::mem::forget(lib) } tcx.sess.abort_if_errors(); imported_symbols } fn run_aot( tcx: TyCtxt<'_>, metadata: EncodedMetadata, need_metadata_module: bool, log: &mut Option<File>, ) -> Box<CodegenResults> { let new_module = |name: String| { let module: Module<FaerieBackend> = Module::new( FaerieBuilder::new( crate::build_isa(tcx.sess, true), name + ".o", FaerieTrapCollection::Disabled, cranelift_module::default_libcall_names(), ) .unwrap(), ); assert_eq!(pointer_ty(tcx), module.target_config().pointer_type()); module }; let emit_module = |kind: ModuleKind, mut module: Module<FaerieBackend>, debug: Option<DebugContext>| { module.finalize_definitions(); let mut artifact = module.finish().artifact; if let Some(mut debug) = debug { debug.emit(&mut artifact); } let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Object, Some(&artifact.name)); let obj = artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); CompiledModule { name: artifact.name, kind, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, } }; let mut faerie_module = new_module("some_file".to_string()); let mut debug = if tcx.sess.opts.debuginfo != DebugInfo::None // macOS debuginfo doesn't work yet (see #303) && !tcx.sess.target.target.options.is_like_osx { let debug = DebugContext::new( tcx, faerie_module.target_config().pointer_type().bytes() as u8, ); Some(debug) } else { None }; codegen_cgus(tcx, &mut faerie_module, &mut debug, log); tcx.sess.abort_if_errors(); let mut allocator_module = new_module("allocator_shim".to_string()); let created_alloc_shim = crate::allocator::codegen(tcx.sess, &mut allocator_module); rustc_incremental::assert_dep_graph(tcx); rustc_incremental::save_dep_graph(tcx); rustc_incremental::finalize_session_directory(tcx.sess, tcx.crate_hash(LOCAL_CRATE)); let metadata_module = if need_metadata_module { use rustc::mir::mono::CodegenUnitNameBuilder; let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); let metadata_cgu_name = cgu_name_builder .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")) .as_str() .to_string(); let mut metadata_artifact = faerie::Artifact::new( crate::build_isa(tcx.sess, true).triple().clone(), metadata_cgu_name.clone(), ); crate::metadata::write_metadata(tcx, &mut metadata_artifact); let tmp_file = tcx .output_filenames(LOCAL_CRATE) .temp_path(OutputType::Metadata, Some(&metadata_cgu_name)); let obj = metadata_artifact.emit().unwrap(); std::fs::write(&tmp_file, obj).unwrap(); Some(CompiledModule { name: metadata_cgu_name, kind: ModuleKind::Metadata, object: Some(tmp_file), bytecode: None, bytecode_compressed: None, }) } else { None }; Box::new(CodegenResults { crate_name: tcx.crate_name(LOCAL_CRATE), modules: vec![emit_module( ModuleKind::Regular, faerie_module, debug, )], allocator_module: if created_alloc_shim { Some(emit_module( ModuleKind::Allocator, allocator_module, None, )) } else { None }, metadata_module, crate_hash: tcx.crate_hash(LOCAL_CRATE), metadata, windows_subsystem: None, // Windows is not yet supported linker_info: LinkerInfo::new(tcx), crate_info: CrateInfo::new(tcx), }) } fn codegen_cgus<'tcx>( tcx: TyCtxt<'tcx>, module: &mut Module<impl Backend + 'static>, debug: &mut Option<DebugContext<'tcx>>, log: &mut Option<File>, ) { let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); let mono
{ #[cfg(not(target_arch = "wasm32"))] let _: ! = run_jit(tcx, &mut log); #[cfg(target_arch = "wasm32")] panic!("jit not supported on wasm"); }
conditional_block
server.js
3_7393515027707347534_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "__v": 0, * "createdAt": 1558791065, * "group": "memes", * "mediaSource": "instagram", * "mediaSourceUri": "officialsoccermemes", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/92d40c8cf638de53860991dcba9203f7/5D6788E3/t51.2885-19/s150x150/28764502_1936656559982894_8291188198877429760_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Soccer Memes", * "text": "Only elites remember this derby..", * "type": "photo" * } * ] * * @apiSuccessExample Success-Response(players): * HTTP/1.1 200 OK * [ * { * "_id": "5ce96b65b5062e6bacc4fa7d", * "url": "https://scontent-frt3-2.cdninstagram.com/vp/73265ce00cc37d36c995eb5e2e361d21/5D869611/t51.2885-15/sh0.08/e35/c0.179.1440.1440a/s640x640/60455430_110022136781972_1050194925618692924_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "__v": 0, * "createdAt": 1558792059, * "group": "players", * "mediaSource": "instagram", * "mediaSourceUri": "toni.kr8s", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/21b37856c41b4b0cebd99114eeec4e93/5D9B13CC/t51.2885-19/s150x150/22802098_503478856676105_1612933203750813696_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Toni Kroos", * "text": "Coming sooooooon! Very excited to present you the cover of my movie KROOS which will be released in Germany on July 4th! Love it! You too? // Ich freue mich sehr, euch das offizielle Cover zum Film KROOS zeigen zu dürfen, der am 4. Juli ins Kino kommt. Gefällt es euch ?", * "type": "photo" * }, * { * "_id": "5ce92102c52f963c3b18fd48", * "url": "https://scontent.cdninstagram.com/vp/4adde10d76c05734900e4146e0cf53ac/5CEB37FE/t50.2886-16/61073300_2540740436153145_8306512685139230720_n.mp4?_nc_ht=scontent.cdninstagram.com", * "__v": 0, * "createdAt": 1558731402, * "group": "players", * "mediaSource": "instagram", * "mediaSourceUri": "karimbenzema", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/be13818693e5eca9e3bfbcab0e4370e3/5D9BC87F/t51.2885-19/s150x150/49933498_368802787006598_1203420445877993472_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Karim Benzema", * "text": "Nueve", * "type": "video" * } * ] * * @apiErrorExample Error-Response: * HTTP/1.1 422 Unprocessable entity * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.get('/media/content', [ check('group').custom(value => { return value === 'memes' || value === 'players' ? true : false; }), check('page').optional().isInt({min: 0}) ], handleHttpValidationErrors, async (req, res) => { const page = +req.query.page || 0; const mediaPosts = await MediaContent.find({group: req.query.group}).skip(config.ITEMS_PER_PAGE * page).limit(config.ITEMS_PER_PAGE).sort('-createdAt').catch(handleDbError(res)); await setPaginationHeaders(res, MediaContent, {group: req.query.group}, page); res.send(mediaPosts); }); /** * @api {post} /memes Upload meme * @apiDescription Uploads an image with a meme * @apiVersion 1.0.0 * @apiName UploadMeme * @apiGroup Memes * * @apiParam {File} imageFile Image file with a meme. Available extensions: jpeg, png. Max file size: 5MB. * * @apiSuccessExample Success-Response: * HTTP/1.1 200 OK * File uploaded * * @apiErrorExample Error-Response: * HTTP/1.1 400 Bad Request * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.post('/memes', async (req, res) => { // validation if (!req.files) return res.status(400).send('Image file is not found in the request'); if(!req.files.imageFile) return res.status(400).send('Invalid image param name. Use: imageFile'); if(req.files.imageFile.size > 5 * 1024 * 1024) return res.status(400).send('File too large. Max size: 5MB'); if(!['image/jpeg','image/png'].includes(req.files.imageFile.mimetype)) return res.status(400).send('Invalid image extension. Available: jpeg, png'); // upload file const filePrefix = Math.trunc(new Date().getTime() / 1000) + '_' + Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15); const filePath = `${__dirname}/../public/memes/${filePrefix}_${req.files.imageFile.name}`; req.files.imageFile.mv(filePath, (err) => { if (err) return res.status(500).send(err); res.send('File uploaded'); }); }); app.listen(config.SERVER_PORT, () => console.log(`backend listening on port ${config.SERVER_PORT}`)); /** * Helper methods */ /** * Handles MongoDB error * @param {Object} res Express response object */ function handleDbError(res) { return (err) => { res.status(500).json(err); }; } /** * Handles http param validation * @param {Object} req Express request object * @param {Object} res Express response object * @param {Function} next Express next function */ function handleHttpValidationErrors(req, res, next) { const errors = validationResult(req); if(!errors.isEmpty()) { res.status(422).json({errors: errors.array()}); } else { next(); } } /** * Adds http pagination headers to server response * @param {Object} res Express response object * @param {Object} model Mongoose model * @param {Object} totalCountCondition Condtition to get total number of records for model * @param {Number} page Current page * @returns {Object} Express response object */ async function setPagina
tionHeaders(res, mod
identifier_name
server.js
instagram.com/vp/9b249cc679a62edd00eed68a563f89ed/5D7D74C1/t51.2885-15/e35/60600023_2336658259943893_7393515027707347534_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "__v": 0, * "createdAt": 1558791065, * "group": "memes", * "mediaSource": "instagram", * "mediaSourceUri": "officialsoccermemes", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/92d40c8cf638de53860991dcba9203f7/5D6788E3/t51.2885-19/s150x150/28764502_1936656559982894_8291188198877429760_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Soccer Memes", * "text": "Only elites remember this derby..", * "type": "photo" * } * ] * * @apiSuccessExample Success-Response(players): * HTTP/1.1 200 OK * [ * { * "_id": "5ce96b65b5062e6bacc4fa7d", * "url": "https://scontent-frt3-2.cdninstagram.com/vp/73265ce00cc37d36c995eb5e2e361d21/5D869611/t51.2885-15/sh0.08/e35/c0.179.1440.1440a/s640x640/60455430_110022136781972_1050194925618692924_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "__v": 0, * "createdAt": 1558792059, * "group": "players", * "mediaSource": "instagram", * "mediaSourceUri": "toni.kr8s", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/21b37856c41b4b0cebd99114eeec4e93/5D9B13CC/t51.2885-19/s150x150/22802098_503478856676105_1612933203750813696_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Toni Kroos", * "text": "Coming sooooooon! Very excited to present you the cover of my movie KROOS which will be released in Germany on July 4th! Love it! You too? // Ich freue mich sehr, euch das offizielle Cover zum Film KROOS zeigen zu dürfen, der am 4. Juli ins Kino kommt. Gefällt es euch ?", * "type": "photo" * }, * { * "_id": "5ce92102c52f963c3b18fd48", * "url": "https://scontent.cdninstagram.com/vp/4adde10d76c05734900e4146e0cf53ac/5CEB37FE/t50.2886-16/61073300_2540740436153145_8306512685139230720_n.mp4?_nc_ht=scontent.cdninstagram.com", * "__v": 0, * "createdAt": 1558731402, * "group": "players", * "mediaSource": "instagram", * "mediaSourceUri": "karimbenzema", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/be13818693e5eca9e3bfbcab0e4370e3/5D9BC87F/t51.2885-19/s150x150/49933498_368802787006598_1203420445877993472_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Karim Benzema", * "text": "Nueve", * "type": "video" * } * ] * * @apiErrorExample Error-Response: * HTTP/1.1 422 Unprocessable entity * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.get('/media/content', [ check('group').custom(value => { return value === 'memes' || value === 'players' ? true : false; }), check('page').optional().isInt({min: 0}) ], handleHttpValidationErrors, async (req, res) => { const page = +req.query.page || 0; const mediaPosts = await MediaContent.find({group: req.query.group}).skip(config.ITEMS_PER_PAGE * page).limit(config.ITEMS_PER_PAGE).sort('-createdAt').catch(handleDbError(res)); await setPaginationHeaders(res, MediaContent, {group: req.query.group}, page); res.send(mediaPosts); }); /** * @api {post} /memes Upload meme * @apiDescription Uploads an image with a meme * @apiVersion 1.0.0 * @apiName UploadMeme * @apiGroup Memes * * @apiParam {File} imageFile Image file with a meme. Available extensions: jpeg, png. Max file size: 5MB. * * @apiSuccessExample Success-Response: * HTTP/1.1 200 OK * File uploaded * * @apiErrorExample Error-Response: * HTTP/1.1 400 Bad Request * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.post('/memes', async (req, res) => { // validation if (!req.files) return res.status(400).send('Image file is not found in the request'); if(!req.files.imageFile) return res.status(400).send('Invalid image param name. Use: imageFile'); if(req.files.imageFile.size > 5 * 1024 * 1024) return res.status(400).send('File too large. Max size: 5MB'); if(!['image/jpeg','image/png'].includes(req.files.imageFile.mimetype)) return res.status(400).send('Invalid image extension. Available: jpeg, png'); // upload file const filePrefix = Math.trunc(new Date().getTime() / 1000) + '_' + Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15); const filePath = `${__dirname}/../public/memes/${filePrefix}_${req.files.imageFile.name}`; req.files.imageFile.mv(filePath, (err) => { if (err) return res.status(500).send(err); res.send('File uploaded'); }); }); app.listen(config.SERVER_PORT, () => console.log(`backend listening on port ${config.SERVER_PORT}`)); /** * Helper methods */ /** * Handles MongoDB error * @param {Object} res Express response object */ function handleDbError(res) { return (err) => { res.status(500).json(err); }; } /** * Handles http param validation * @param {Object} req Express request object * @param {Object} res Express response object * @param {Function} next Express next function */ function handleHttpValidationErrors(req, res, next) { const
errors = validationResult(req); if(!errors.isEmpty()) { res.status(422).json({errors: errors.array()}); } else { next(); } } /** *
identifier_body
server.js
"country": "France", * "country_code": "FR", * "name": "Ligue 1", * "season": "2018", * "season_end": "2019-05-25", * "season_start": "2018-08-10" * } * } * ] * * @apiErrorExample Error-Response: * HTTP/1.1 422 Unprocessable entity * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.get('/fixtures', [ check('from').optional({checkFalsy: true}).isInt(), check('to').optional({checkFalsy: true}).isInt() ], handleHttpValidationErrors, async (req, res) => { const from = req.query.from || moment().subtract(6, 'hours').unix(); const to = req.query.to || moment().add(2, 'days').unix(); const fixtures = await Fixture.find({event_timestamp: {$gte: from, $lt: to}}).populate('league').catch(handleDbError(res)); res.send(fixtures); }); /** * @api {get} /leagues Get leagues * @apiDescription Returns leagues. By default returns leagues which are not finished yet.
* @apiSuccessExample Success-Response: * [ * { * "_id": "5c81b165986149a3f58060e9", * "id": 289, * "__v": 0, * "country": "Australia", * "country_code": "AU", * "name": "National Premier Leagues", * "season": "2019", * "season_end": "2019-08-18", * "season_start": "2019-04-05", * "telegram_group_name": "Australia National Premier Leagues FansInTears", * "telegram_invite_link": "https://t.me/joinchat/GdDWTRDcP-XkV8--tdCTNg", * "logo": "https://www.api-football.com/public/leagues/289.png" * }, * { * "_id": "5c81b165986149a3f5806157", * "id": 344, * "__v": 0, * "country": "Peru", * "country_code": "PE", * "name": "Primera Division", * "season": "2019", * "season_end": "2019-11-23", * "season_start": "2019-02-16", * "telegram_group_name": "Peru Primera Division FansInTears", * "telegram_invite_link": "https://t.me/joinchat/GdDWTRaNOaKZwnk17ghPCA", * "logo": "" * } * ] * * @apiErrorExample Error-Response: * HTTP/1.1 500 Internal server error */ app.get('/leagues', async (req, res) => { const today = moment().format('YYYY-MM-DD'); const leagues = await League.find({season_end: {$gte: today}}).sort('country').catch(handleDbError(res)); res.send(leagues); }); /** * @api {get} /media/content Get media content * @apiDescription Returns media content by group * @apiVersion 1.0.0 * @apiName GetMediaContent * @apiGroup MediaContent * * @apiParam {String} group Media content group. Available values: 'memes' and 'players'. * @apiParam {Number} [page] Page number. Default: 0. * * @apiHeader {String} X-Total-Count Response header. Total number of records. * @apiHeader {String} X-Limit Response header. Number of records per page. * @apiHeader {String} X-Page-Last Response header. Index of the past page. Pagination starts from page with index 0. * @apiHeader {String} X-Page Response header. Index of the current page. Pagination starts from page with index 0. * * @apiSuccessExample Success-Response(memes): * HTTP/1.1 200 OK * [ * { * "_id": "5ce96b65b5062e6bacc4f8e4", * "url": "https://pbs.twimg.com/media/D7bH6i_UIAAWH8e.jpg", * "__v": 0, * "createdAt": 1558799247, * "group": "memes", * "mediaSource": "twitter", * "mediaSourceUri": "TrollFootball", * "profileAvatar": null, * "profileFullName": "TrollFootball", * "text": "OFFICIAL: Pep Guardiola unveiled as the new Juventus manager", * "type": "photo" * }, * { * "_id": "5ce96b65b5062e6bacc4f93d", * "url": "https://scontent.cdninstagram.com/vp/166819bcf2c47256785091fb5ee19f6a/5CEBFCCA/t50.2886-16/61727911_2713987785282562_2444701657934196060_n.mp4?_nc_ht=scontent.cdninstagram.com", * "__v": 0, * "createdAt": 1558791455, * "group": "memes", * "mediaSource": "instagram", * "mediaSourceUri": "footballmemesinsta", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/5150f7c3f30fa499375ce0cf2ba49232/5D689416/t51.2885-19/s150x150/53687215_311095812887779_1851112225763229696_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Football • Soccer • Fútbol", * "text": "This is the best thing you’ll se all weekend", * "type": "video" * }, * { * "_id": "5ce96b65b5062e6bacc4f965", * "url": "https://scontent-frt3-2.cdninstagram.com/vp/9b249cc679a62edd00eed68a563f89ed/5D7D74C1/t51.2885-15/e35/60600023_2336658259943893_7393515027707347534_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "__v": 0, * "createdAt": 1558791065, * "group": "memes", * "mediaSource": "instagram", * "mediaSourceUri": "officialsoccermemes", * "profileAvatar": "https://scontent-frt3-2.cdninstagram.com/vp/92d40c8cf638de53860991dcba9203f7/5D6788E3/t51.2885-19/s150x150/28764502_1936656559982894_8291188198877429760_n.jpg?_nc_ht=scontent-frt3-2.cdninstagram.com", * "profileFullName": "Soccer Memes", * "text": "Only elites remember this derby..", * "type": "photo" * } * ] * * @apiSuccessExample Success-Response(players): * HTTP/1.1 200 OK * [ * { * "_id": "5ce96b65b5062e6bacc4fa7d", * "url": "https://scontent-frt3-2.cdninstagram.com
* @apiVersion 1.0.0 * @apiName GetLeagues * @apiGroup League *
random_line_split
asterix_utils.py
_2.xml', 252: 'asterix_cat252_7_0.xml'} # , #252: 'asterix_cat252_6_1.xml'} def load_asterix_category_format(k): """ Return a Document object representing the content of the document from the given input. Args: k (int): The ASTERIX category. Returns: xml.dom.minidom: The Document Object Model interface. """ global filenames try: __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..')) # Look for file in current executing directory path_filename1 = filenames[k] # On default directory (absolute) path_filename2 = __basePath__ + "/" +filenames[k] # On default directory (relative) path_filename3 = os.path.dirname(os.path.realpath(__file__)) + "/xml/" + filenames[k] if os.path.isfile(path_filename1): # print "Loading file '%s'" % path_filename1 return minidom.parse(path_filename1) if os.path.isfile(path_filename2): # print "Loading file '%s'" % path_filename2 return minidom.parse(path_filename2) if os.path.isfile(path_filename3): # print "Loading file '%s'" % path_filename3 return minidom.parse(path_filename3) return None except: traceback.print_exc() return None def encode(asterix): """ Encodes a dictionary (asterix) in the EUROCONTROL ASTERIX category. Args: asterix (dict): A dictionary with data block of ASTERIX category. Returns: asterix_record (buffer): Data block buffer. """ assert type(asterix) is dict asterix_record = 0 #priority_asterix_cat = [21, 34] for k, v in asterix.iteritems(): #for k in priority_asterix_cat: v = asterix[k] record = 0 n_octets_data_record = 0 cat = 0 ctf = load_asterix_category_format(k) if ctf is None: continue if verbose >= 1: print 'encoding cat', k cat = k for cat_tree in ctf.getElementsByTagName('Category'): if k != int(cat_tree.getAttribute('id')): continue for data_record in v: ll_db, db = encode_category(k, data_record, cat_tree) #TODO: use maximum datablock size record <<= ll_db * 8 record += db n_octets_data_record += ll_db if verbose >= 1: print "Tamanho do bloco de dados ", ll_db break # Record header ( CAT + LEN ) record += (cat << (n_octets_data_record * 8 + 16)) record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8)) asterix_record <<= (1 + 2 + n_octets_data_record) * 8 asterix_record += record return asterix_record def encode_category(cat, did, tree): """ Encodes the record from the given category (cat). Args: cat (int): The given category. did (dict): The dictionary with data to encode. tree (Document object): The specification for ASTERIX category. Returns: (n_octets_data_record, data_record) (tuples): The caetgory record size and record. """ if did == {}: return 0, 0 mdi = {} for c in tree.getElementsByTagName('DataItem'): di = c.getAttribute('id') if di.isdigit(): di = int(di) rule = c.getAttribute('rule') if di in did: if verbose >= 1: print 'encoding dataitem', di l, v = encode_dataitem(did[di], c) mdi[di] = l, v else: if rule == 'mandatory' and verbose >= 1: print 'absent mandatory dataitem', di data_record = 0L n_octets_data_record = 0 sorted_mdi_keys = sorted(mdi.keys()) fspec_bits = [] uap_tree = tree.getElementsByTagName('UAP')[0] for cn in uap_tree.childNodes: if cn.nodeName != 'UAPItem': continue uapi_value = cn.firstChild.nodeValue if uapi_value.isdigit(): uapi_value = int(uapi_value) if uapi_value in sorted_mdi_keys: fspec_bits.append(int(cn.getAttribute('bit'))) l, v = mdi[uapi_value] data_record <<= l * 8 data_record += v n_octets_data_record += l if fspec_bits == []: print 'no dataitems identified' return 0, 0 # FSPEC for data record max_bit = max(fspec_bits) n_octets_fspec = max_bit / 8 + 1 # Fn fspec = 0 for i in fspec_bits: fspec += (1 << (n_octets_fspec * 8 - 1 - i)) # FX for i in range(n_octets_fspec - 1): fspec += (1 << ((n_octets_fspec - 1 - i) * 8)) data_record += (fspec << (n_octets_data_record * 8)) n_octets_data_record += n_octets_fspec return n_octets_data_record, data_record def encode_dataitem(dfd, tree): """Returns the encoded Data Item. Encodes the Data Item in the data field of record according to the rules defined in the XML file. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The specification for ASTERIX category. Returns: (length, value) (tuples): The Data Field size and Data Field. """ assert type(dfd) is dict or type(dfd) is list for c in tree.getElementsByTagName('DataItemFormat'): for d in c.childNodes: if d.nodeName == 'Fixed': return encode_fixed(dfd, d) else: if d.nodeName == 'Variable': return encode_variable(dfd, d) else: if d.nodeName == 'Repetitive': return encode_repetitive(dfd, d) else: if d.nodeName == 'Compound': return encode_compound(dfd, d) def encode_fixed(bd, tree): """ Returns the encoded Data Item as a fixed length Data Field. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The rules to encode Data Item. Returns: (length, value) (tuples): The Data Field size and Data Field. """ length = int(tree.getAttribute('length')) value = 0 has_encoded = False for cn in tree.childNodes: if cn.nodeName != 'Bits': continue key = cn.getElementsByTagName('BitsShortName')[0].firstChild.nodeValue bits_unit = cn.getElementsByTagName('BitsUnit') if key in bd and key != 'FX': has_encoded = True assert (cn.getAttribute('bit') == '' and (cn.getAttribute('from') != '' and cn.getAttribute('to') != '')) or (cn.getAttribute('bit') != '' and (cn.getAttribute('from') == '' and cn.getAttribute('to') == '')) bit_ = cn.getAttribute('bit') if bit_ != '': bit_ = int(bit_) shift_left = bit_ - 1 mask = 0x1 else: from_ = int(cn.getAttribute('from')) to_ = int(cn.getAttribute('to')) if from_ < to_: # swap values x = to_ to_ = from_ from_ = x shift_left = to_ - 1 mask = (1 << (from_ - to_ + 1)) - 1 v = bd[key] if len(bits_unit): scale = bits_unit[0].getAttribute('scale') v = int(v / float(scale)) #TODO: consider 'encode' attr value += ((v & mask) << shift_left) else: if key != 'FX' and verbose >= 2: print 'field', key, 'absent in input' if has_encoded is False: return 0, 0 return length, value def encode_variable(db, tree):
""" Returns the encoded Data Item as a variable length Data Field. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The rules to encode Data Item. Returns: (length, value) (tuples): The Data Field size and Data Field. """ variable = None length = 0 for cn in tree.childNodes: if cn.nodeName == 'Fixed': l, v = encode_fixed(db, cn) assert l <= 1 if l > 0: if v % 2 == 1: # remove FX v -= 1
identifier_body
asterix_utils.py
', 10: 'asterix_cat010_1_1.xml', 19: 'asterix_cat019_1_2.xml', 20: 'asterix_cat020_1_7.xml', #21: 'asterix_cat021_0_23.xml', 21: 'asterix_cat021_0_26.xml', #21: 'asterix_cat021_1_8.xml', 23: 'asterix_cat023_0_13.xml', 30: 'asterix_cat030_6_2.xml', 31: 'asterix_cat031_6_2.xml', #32: 'asterix_cat032_6_2.xml', 32: 'asterix_cat032_7_0.xml', 34: 'asterix_cat034_1_26.xml', 48: 'asterix_cat048_1_14.xml', #62: 'asterix_cat062_0_17.xml', #62: 'asterix_cat062_1_9.xml', 62: 'asterix_cat062_1_16.xml', #62: 'asterix_cat062_1_7.xml', 63: 'asterix_cat063_1_3.xml', 65: 'asterix_cat065_1_3.xml', #65:'asterix_cat065_1_2.xml', 242: 'asterix_cat242_1_0.xml', #252: 'asterix_cat252_6_2.xml', 252: 'asterix_cat252_7_0.xml'} # , #252: 'asterix_cat252_6_1.xml'} def load_asterix_category_format(k): """ Return a Document object representing the content of the document from the given input. Args: k (int): The ASTERIX category. Returns: xml.dom.minidom: The Document Object Model interface. """ global filenames try: __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..')) # Look for file in current executing directory path_filename1 = filenames[k] # On default directory (absolute) path_filename2 = __basePath__ + "/" +filenames[k] # On default directory (relative) path_filename3 = os.path.dirname(os.path.realpath(__file__)) + "/xml/" + filenames[k] if os.path.isfile(path_filename1): # print "Loading file '%s'" % path_filename1 return minidom.parse(path_filename1) if os.path.isfile(path_filename2): # print "Loading file '%s'" % path_filename2 return minidom.parse(path_filename2) if os.path.isfile(path_filename3): # print "Loading file '%s'" % path_filename3 return minidom.parse(path_filename3) return None except: traceback.print_exc() return None def encode(asterix): """ Encodes a dictionary (asterix) in the EUROCONTROL ASTERIX category. Args: asterix (dict): A dictionary with data block of ASTERIX category. Returns: asterix_record (buffer): Data block buffer. """ assert type(asterix) is dict asterix_record = 0 #priority_asterix_cat = [21, 34] for k, v in asterix.iteritems(): #for k in priority_asterix_cat: v = asterix[k] record = 0 n_octets_data_record = 0 cat = 0 ctf = load_asterix_category_format(k) if ctf is None: continue if verbose >= 1:
cat = k for cat_tree in ctf.getElementsByTagName('Category'): if k != int(cat_tree.getAttribute('id')): continue for data_record in v: ll_db, db = encode_category(k, data_record, cat_tree) #TODO: use maximum datablock size record <<= ll_db * 8 record += db n_octets_data_record += ll_db if verbose >= 1: print "Tamanho do bloco de dados ", ll_db break # Record header ( CAT + LEN ) record += (cat << (n_octets_data_record * 8 + 16)) record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8)) asterix_record <<= (1 + 2 + n_octets_data_record) * 8 asterix_record += record return asterix_record def encode_category(cat, did, tree): """ Encodes the record from the given category (cat). Args: cat (int): The given category. did (dict): The dictionary with data to encode. tree (Document object): The specification for ASTERIX category. Returns: (n_octets_data_record, data_record) (tuples): The caetgory record size and record. """ if did == {}: return 0, 0 mdi = {} for c in tree.getElementsByTagName('DataItem'): di = c.getAttribute('id') if di.isdigit(): di = int(di) rule = c.getAttribute('rule') if di in did: if verbose >= 1: print 'encoding dataitem', di l, v = encode_dataitem(did[di], c) mdi[di] = l, v else: if rule == 'mandatory' and verbose >= 1: print 'absent mandatory dataitem', di data_record = 0L n_octets_data_record = 0 sorted_mdi_keys = sorted(mdi.keys()) fspec_bits = [] uap_tree = tree.getElementsByTagName('UAP')[0] for cn in uap_tree.childNodes: if cn.nodeName != 'UAPItem': continue uapi_value = cn.firstChild.nodeValue if uapi_value.isdigit(): uapi_value = int(uapi_value) if uapi_value in sorted_mdi_keys: fspec_bits.append(int(cn.getAttribute('bit'))) l, v = mdi[uapi_value] data_record <<= l * 8 data_record += v n_octets_data_record += l if fspec_bits == []: print 'no dataitems identified' return 0, 0 # FSPEC for data record max_bit = max(fspec_bits) n_octets_fspec = max_bit / 8 + 1 # Fn fspec = 0 for i in fspec_bits: fspec += (1 << (n_octets_fspec * 8 - 1 - i)) # FX for i in range(n_octets_fspec - 1): fspec += (1 << ((n_octets_fspec - 1 - i) * 8)) data_record += (fspec << (n_octets_data_record * 8)) n_octets_data_record += n_octets_fspec return n_octets_data_record, data_record def encode_dataitem(dfd, tree): """Returns the encoded Data Item. Encodes the Data Item in the data field of record according to the rules defined in the XML file. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The specification for ASTERIX category. Returns: (length, value) (tuples): The Data Field size and Data Field. """ assert type(dfd) is dict or type(dfd) is list for c in tree.getElementsByTagName('DataItemFormat'): for d in c.childNodes: if d.nodeName == 'Fixed': return encode_fixed(dfd, d) else: if d.nodeName == 'Variable': return encode_variable(dfd, d) else: if d.nodeName == 'Repetitive': return encode_repetitive(dfd, d) else: if d.nodeName == 'Compound': return encode_compound(dfd, d) def encode_fixed(bd, tree): """ Returns the encoded Data Item as a fixed length Data Field. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The rules to encode Data Item. Returns: (length, value) (tuples): The Data Field size and Data Field. """ length = int(tree.getAttribute('length')) value = 0 has_encoded = False for cn in tree.childNodes: if cn.nodeName != 'Bits': continue key = cn.getElementsByTagName('BitsShortName')[0].firstChild.nodeValue bits_unit = cn.getElementsByTagName('BitsUnit') if key in bd and key != 'FX': has_encoded = True assert (cn.getAttribute('bit') == '' and (cn.getAttribute('from') != '' and cn.getAttribute('to') !=
print 'encoding cat', k
conditional_block
asterix_utils.py
', 10: 'asterix_cat010_1_1.xml', 19: 'asterix_cat019_1_2.xml', 20: 'asterix_cat020_1_7.xml', #21: 'asterix_cat021_0_23.xml', 21: 'asterix_cat021_0_26.xml', #21: 'asterix_cat021_1_8.xml', 23: 'asterix_cat023_0_13.xml', 30: 'asterix_cat030_6_2.xml', 31: 'asterix_cat031_6_2.xml', #32: 'asterix_cat032_6_2.xml', 32: 'asterix_cat032_7_0.xml', 34: 'asterix_cat034_1_26.xml', 48: 'asterix_cat048_1_14.xml', #62: 'asterix_cat062_0_17.xml', #62: 'asterix_cat062_1_9.xml', 62: 'asterix_cat062_1_16.xml', #62: 'asterix_cat062_1_7.xml', 63: 'asterix_cat063_1_3.xml', 65: 'asterix_cat065_1_3.xml', #65:'asterix_cat065_1_2.xml', 242: 'asterix_cat242_1_0.xml', #252: 'asterix_cat252_6_2.xml', 252: 'asterix_cat252_7_0.xml'} # , #252: 'asterix_cat252_6_1.xml'} def load_asterix_category_format(k): """ Return a Document object representing the content of the document from the given input. Args: k (int): The ASTERIX category. Returns: xml.dom.minidom: The Document Object Model interface. """ global filenames try: __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..')) # Look for file in current executing directory path_filename1 = filenames[k] # On default directory (absolute) path_filename2 = __basePath__ + "/" +filenames[k] # On default directory (relative) path_filename3 = os.path.dirname(os.path.realpath(__file__)) + "/xml/" + filenames[k] if os.path.isfile(path_filename1): # print "Loading file '%s'" % path_filename1 return minidom.parse(path_filename1) if os.path.isfile(path_filename2): # print "Loading file '%s'" % path_filename2 return minidom.parse(path_filename2) if os.path.isfile(path_filename3): # print "Loading file '%s'" % path_filename3 return minidom.parse(path_filename3) return None except: traceback.print_exc() return None def encode(asterix): """ Encodes a dictionary (asterix) in the EUROCONTROL ASTERIX category. Args: asterix (dict): A dictionary with data block of ASTERIX category. Returns: asterix_record (buffer): Data block buffer. """ assert type(asterix) is dict asterix_record = 0 #priority_asterix_cat = [21, 34] for k, v in asterix.iteritems(): #for k in priority_asterix_cat: v = asterix[k] record = 0 n_octets_data_record = 0 cat = 0 ctf = load_asterix_category_format(k) if ctf is None: continue if verbose >= 1: print 'encoding cat', k cat = k for cat_tree in ctf.getElementsByTagName('Category'): if k != int(cat_tree.getAttribute('id')): continue for data_record in v: ll_db, db = encode_category(k, data_record, cat_tree) #TODO: use maximum datablock size record <<= ll_db * 8 record += db n_octets_data_record += ll_db if verbose >= 1: print "Tamanho do bloco de dados ", ll_db break # Record header ( CAT + LEN ) record += (cat << (n_octets_data_record * 8 + 16)) record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8)) asterix_record <<= (1 + 2 + n_octets_data_record) * 8 asterix_record += record return asterix_record def encode_category(cat, did, tree): """ Encodes the record from the given category (cat). Args: cat (int): The given category. did (dict): The dictionary with data to encode. tree (Document object): The specification for ASTERIX category. Returns: (n_octets_data_record, data_record) (tuples): The caetgory record size and record. """ if did == {}: return 0, 0 mdi = {} for c in tree.getElementsByTagName('DataItem'): di = c.getAttribute('id') if di.isdigit(): di = int(di) rule = c.getAttribute('rule') if di in did: if verbose >= 1: print 'encoding dataitem', di l, v = encode_dataitem(did[di], c) mdi[di] = l, v else: if rule == 'mandatory' and verbose >= 1: print 'absent mandatory dataitem', di data_record = 0L n_octets_data_record = 0 sorted_mdi_keys = sorted(mdi.keys()) fspec_bits = [] uap_tree = tree.getElementsByTagName('UAP')[0] for cn in uap_tree.childNodes: if cn.nodeName != 'UAPItem': continue uapi_value = cn.firstChild.nodeValue if uapi_value.isdigit(): uapi_value = int(uapi_value) if uapi_value in sorted_mdi_keys: fspec_bits.append(int(cn.getAttribute('bit'))) l, v = mdi[uapi_value] data_record <<= l * 8 data_record += v n_octets_data_record += l if fspec_bits == []: print 'no dataitems identified' return 0, 0 # FSPEC for data record max_bit = max(fspec_bits) n_octets_fspec = max_bit / 8 + 1 # Fn fspec = 0 for i in fspec_bits: fspec += (1 << (n_octets_fspec * 8 - 1 - i)) # FX for i in range(n_octets_fspec - 1): fspec += (1 << ((n_octets_fspec - 1 - i) * 8)) data_record += (fspec << (n_octets_data_record * 8)) n_octets_data_record += n_octets_fspec return n_octets_data_record, data_record def encode_dataitem(dfd, tree): """Returns the encoded Data Item. Encodes the Data Item in the data field of record according to the rules defined in the XML file. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The specification for ASTERIX category. Returns: (length, value) (tuples): The Data Field size and Data Field. """ assert type(dfd) is dict or type(dfd) is list for c in tree.getElementsByTagName('DataItemFormat'): for d in c.childNodes: if d.nodeName == 'Fixed': return encode_fixed(dfd, d) else: if d.nodeName == 'Variable': return encode_variable(dfd, d) else: if d.nodeName == 'Repetitive': return encode_repetitive(dfd, d) else: if d.nodeName == 'Compound': return encode_compound(dfd, d) def
(bd, tree): """ Returns the encoded Data Item as a fixed length Data Field. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The rules to encode Data Item. Returns: (length, value) (tuples): The Data Field size and Data Field. """ length = int(tree.getAttribute('length')) value = 0 has_encoded = False for cn in tree.childNodes: if cn.nodeName != 'Bits': continue key = cn.getElementsByTagName('BitsShortName')[0].firstChild.nodeValue bits_unit = cn.getElementsByTagName('BitsUnit') if key in bd and key != 'FX': has_encoded = True assert (cn.getAttribute('bit') == '' and (cn.getAttribute('from') != '' and cn.getAttribute('to')
encode_fixed
identifier_name
asterix_utils.py
', 10: 'asterix_cat010_1_1.xml', 19: 'asterix_cat019_1_2.xml', 20: 'asterix_cat020_1_7.xml', #21: 'asterix_cat021_0_23.xml', 21: 'asterix_cat021_0_26.xml', #21: 'asterix_cat021_1_8.xml', 23: 'asterix_cat023_0_13.xml', 30: 'asterix_cat030_6_2.xml', 31: 'asterix_cat031_6_2.xml', #32: 'asterix_cat032_6_2.xml', 32: 'asterix_cat032_7_0.xml', 34: 'asterix_cat034_1_26.xml', 48: 'asterix_cat048_1_14.xml', #62: 'asterix_cat062_0_17.xml', #62: 'asterix_cat062_1_9.xml', 62: 'asterix_cat062_1_16.xml', #62: 'asterix_cat062_1_7.xml', 63: 'asterix_cat063_1_3.xml', 65: 'asterix_cat065_1_3.xml', #65:'asterix_cat065_1_2.xml', 242: 'asterix_cat242_1_0.xml', #252: 'asterix_cat252_6_2.xml', 252: 'asterix_cat252_7_0.xml'} # , #252: 'asterix_cat252_6_1.xml'} def load_asterix_category_format(k): """ Return a Document object representing the content of the document from the given input. Args: k (int): The ASTERIX category. Returns: xml.dom.minidom: The Document Object Model interface.
# Look for file in current executing directory path_filename1 = filenames[k] # On default directory (absolute) path_filename2 = __basePath__ + "/" +filenames[k] # On default directory (relative) path_filename3 = os.path.dirname(os.path.realpath(__file__)) + "/xml/" + filenames[k] if os.path.isfile(path_filename1): # print "Loading file '%s'" % path_filename1 return minidom.parse(path_filename1) if os.path.isfile(path_filename2): # print "Loading file '%s'" % path_filename2 return minidom.parse(path_filename2) if os.path.isfile(path_filename3): # print "Loading file '%s'" % path_filename3 return minidom.parse(path_filename3) return None except: traceback.print_exc() return None def encode(asterix): """ Encodes a dictionary (asterix) in the EUROCONTROL ASTERIX category. Args: asterix (dict): A dictionary with data block of ASTERIX category. Returns: asterix_record (buffer): Data block buffer. """ assert type(asterix) is dict asterix_record = 0 #priority_asterix_cat = [21, 34] for k, v in asterix.iteritems(): #for k in priority_asterix_cat: v = asterix[k] record = 0 n_octets_data_record = 0 cat = 0 ctf = load_asterix_category_format(k) if ctf is None: continue if verbose >= 1: print 'encoding cat', k cat = k for cat_tree in ctf.getElementsByTagName('Category'): if k != int(cat_tree.getAttribute('id')): continue for data_record in v: ll_db, db = encode_category(k, data_record, cat_tree) #TODO: use maximum datablock size record <<= ll_db * 8 record += db n_octets_data_record += ll_db if verbose >= 1: print "Tamanho do bloco de dados ", ll_db break # Record header ( CAT + LEN ) record += (cat << (n_octets_data_record * 8 + 16)) record += ((1 + 2 + n_octets_data_record) << ((n_octets_data_record) * 8)) asterix_record <<= (1 + 2 + n_octets_data_record) * 8 asterix_record += record return asterix_record def encode_category(cat, did, tree): """ Encodes the record from the given category (cat). Args: cat (int): The given category. did (dict): The dictionary with data to encode. tree (Document object): The specification for ASTERIX category. Returns: (n_octets_data_record, data_record) (tuples): The caetgory record size and record. """ if did == {}: return 0, 0 mdi = {} for c in tree.getElementsByTagName('DataItem'): di = c.getAttribute('id') if di.isdigit(): di = int(di) rule = c.getAttribute('rule') if di in did: if verbose >= 1: print 'encoding dataitem', di l, v = encode_dataitem(did[di], c) mdi[di] = l, v else: if rule == 'mandatory' and verbose >= 1: print 'absent mandatory dataitem', di data_record = 0L n_octets_data_record = 0 sorted_mdi_keys = sorted(mdi.keys()) fspec_bits = [] uap_tree = tree.getElementsByTagName('UAP')[0] for cn in uap_tree.childNodes: if cn.nodeName != 'UAPItem': continue uapi_value = cn.firstChild.nodeValue if uapi_value.isdigit(): uapi_value = int(uapi_value) if uapi_value in sorted_mdi_keys: fspec_bits.append(int(cn.getAttribute('bit'))) l, v = mdi[uapi_value] data_record <<= l * 8 data_record += v n_octets_data_record += l if fspec_bits == []: print 'no dataitems identified' return 0, 0 # FSPEC for data record max_bit = max(fspec_bits) n_octets_fspec = max_bit / 8 + 1 # Fn fspec = 0 for i in fspec_bits: fspec += (1 << (n_octets_fspec * 8 - 1 - i)) # FX for i in range(n_octets_fspec - 1): fspec += (1 << ((n_octets_fspec - 1 - i) * 8)) data_record += (fspec << (n_octets_data_record * 8)) n_octets_data_record += n_octets_fspec return n_octets_data_record, data_record def encode_dataitem(dfd, tree): """Returns the encoded Data Item. Encodes the Data Item in the data field of record according to the rules defined in the XML file. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The specification for ASTERIX category. Returns: (length, value) (tuples): The Data Field size and Data Field. """ assert type(dfd) is dict or type(dfd) is list for c in tree.getElementsByTagName('DataItemFormat'): for d in c.childNodes: if d.nodeName == 'Fixed': return encode_fixed(dfd, d) else: if d.nodeName == 'Variable': return encode_variable(dfd, d) else: if d.nodeName == 'Repetitive': return encode_repetitive(dfd, d) else: if d.nodeName == 'Compound': return encode_compound(dfd, d) def encode_fixed(bd, tree): """ Returns the encoded Data Item as a fixed length Data Field. Args: dfd (dict): The dictionary with Data Item values. tree (Document object): The rules to encode Data Item. Returns: (length, value) (tuples): The Data Field size and Data Field. """ length = int(tree.getAttribute('length')) value = 0 has_encoded = False for cn in tree.childNodes: if cn.nodeName != 'Bits': continue key = cn.getElementsByTagName('BitsShortName')[0].firstChild.nodeValue bits_unit = cn.getElementsByTagName('BitsUnit') if key in bd and key != 'FX': has_encoded = True assert (cn.getAttribute('bit') == '' and (cn.getAttribute('from') != '' and cn.getAttribute('to') !=
""" global filenames try: __basePath__ = os.path.abspath(os.path.join(os.getcwd(), '../../../..'))
random_line_split
lib.rs
Matrix<T> for ndarray::Array2<T> { #[inline] fn nrows(&self) -> usize { self.nrows() } #[inline] fn ncols(&self) -> usize { self.ncols() } #[inline] fn index(&self, row: usize, column: usize) -> T { self[[row, column]] } } /// Compute row minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding row minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero columns. pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { // Benchmarking shows that SMAWK performs roughly the same on row- // and column-major matrices. let mut minima = vec![0; matrix.nrows()]; smawk_inner( &|j, i| matrix.index(i, j), &(0..matrix.ncols()).collect::<Vec<_>>(), &(0..matrix.nrows()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding column minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero rows. pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { let mut minima = vec![0; matrix.ncols()]; smawk_inner( &|i, j| matrix.index(i, j), &(0..matrix.nrows()).collect::<Vec<_>>(), &(0..matrix.ncols()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in the given area of the matrix. The /// `minima` slice is updated inplace. fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>( matrix: &M, rows: &[usize], cols: &[usize], mut minima: &mut [usize], ) { if cols.is_empty() { return; } let mut stack = Vec::with_capacity(cols.len()); for r in rows { // TODO: use stack.last() instead of stack.is_empty() etc while !stack.is_empty() && matrix(stack[stack.len() - 1], cols[stack.len() - 1]) > matrix(*r, cols[stack.len() - 1]) { stack.pop(); } if stack.len() != cols.len() { stack.push(*r); } } let rows = &stack; let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2); for (idx, c) in cols.iter().enumerate() { if idx % 2 == 1 { odd_cols.push(*c); } } smawk_inner(matrix, rows, &odd_cols, &mut minima); let mut r = 0; for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) { let mut row = rows[r]; let last_row = if c == cols.len() - 1 { rows[rows.len() - 1] } else { minima[cols[c + 1]] }; let mut pair = (matrix(row, col), row); while row != last_row { r += 1; row = rows[r]; if (matrix(row, col), row) < pair { pair = (matrix(row, col), row); } } minima[col] = pair.1; } } /// Compute upper-right column minima in O(*m* + *n*) time. /// /// The input matrix must be totally monotone. /// /// The function returns a vector of `(usize, T)`. The `usize` in the /// tuple at index `j` tells you the row of the minimum value in /// column `j` and the `T` value is minimum value itself. /// /// The algorithm only considers values above the main diagonal, which /// means that it computes values `v(j)` where: /// /// ```text /// v(0) = initial /// v(j) = min { M[i, j] | i < j } for j > 0 /// ``` /// /// If we let `r(j)` denote the row index of the minimum value in /// column `j`, the tuples in the result vector become `(r(j), M[r(j), /// j])`. /// /// The algorithm is an *online* algorithm, in the sense that `matrix` /// function can refer back to previously computed column minima when /// determining an entry in the matrix. The guarantee is that we only /// call `matrix(i, j)` after having computed `v(i)`. This is /// reflected in the `&[(usize, T)]` argument to `matrix`, which grows /// as more and more values are computed. pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>( initial: T, size: usize, matrix: M, ) -> Vec<(usize, T)> { let mut result = vec![(0, initial)]; // State used by the algorithm. let mut finished = 0; let mut base = 0; let mut tentative = 0; // Shorthand for evaluating the matrix. We need a macro here since // we don't want to borrow the result vector. macro_rules! m { ($i:expr, $j:expr) => {{ assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j); assert!( $i < size && $j < size, "(i, j) out of bounds: ({}, {}), size: {}", $i, $j, size ); matrix(&result[..finished + 1], $i, $j) }}; } // Keep going until we have finished all size columns. Since the // columns are zero-indexed, we're done when finished == size - 1. while finished < size - 1 { // First case: we have already advanced past the previous // tentative value. We make a new tentative value by applying // smawk_inner to the largest square submatrix that fits under // the base. let i = finished + 1; if i > tentative { let rows = (base..finished + 1).collect::<Vec<_>>(); tentative = std::cmp::min(finished + rows.len(), size - 1); let cols = (finished + 1..tentative + 1).collect::<Vec<_>>(); let mut minima = vec![0; tentative + 1]; smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima); for col in cols { let row = minima[col]; let v = m![row, col]; if col >= result.len() { result.push((row, v)); } else if v < result[col].1 { result[col] = (row, v); } } finished = i; continue; } // Second case: the new column minimum is on the diagonal. All // subsequent ones will be at least as low, so we can clear // out all our work from higher rows. As in the fourth case, // the loss of tentative is amortized against the increase in // base. let diag = m![i - 1, i]; if diag < result[i].1 { re
sult[i] = (i - 1, diag); base = i - 1; tentative = i; finished = i; continue; } // Thi
conditional_block
lib.rs
/// /// It is an error to call this on a matrix with zero columns. pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { // Benchmarking shows that SMAWK performs roughly the same on row- // and column-major matrices. let mut minima = vec![0; matrix.nrows()]; smawk_inner( &|j, i| matrix.index(i, j), &(0..matrix.ncols()).collect::<Vec<_>>(), &(0..matrix.nrows()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding column minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero rows. pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { let mut minima = vec![0; matrix.ncols()]; smawk_inner( &|i, j| matrix.index(i, j), &(0..matrix.nrows()).collect::<Vec<_>>(), &(0..matrix.ncols()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in the given area of the matrix. The /// `minima` slice is updated inplace. fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>( matrix: &M, rows: &[usize], cols: &[usize], mut minima: &mut [usize], ) { if cols.is_empty() { return; } let mut stack = Vec::with_capacity(cols.len()); for r in rows { // TODO: use stack.last() instead of stack.is_empty() etc while !stack.is_empty() && matrix(stack[stack.len() - 1], cols[stack.len() - 1]) > matrix(*r, cols[stack.len() - 1]) { stack.pop(); } if stack.len() != cols.len() { stack.push(*r); } } let rows = &stack; let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2); for (idx, c) in cols.iter().enumerate() { if idx % 2 == 1 { odd_cols.push(*c); } } smawk_inner(matrix, rows, &odd_cols, &mut minima); let mut r = 0; for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) { let mut row = rows[r]; let last_row = if c == cols.len() - 1 { rows[rows.len() - 1] } else { minima[cols[c + 1]] }; let mut pair = (matrix(row, col), row); while row != last_row { r += 1; row = rows[r]; if (matrix(row, col), row) < pair { pair = (matrix(row, col), row); } } minima[col] = pair.1; } } /// Compute upper-right column minima in O(*m* + *n*) time. /// /// The input matrix must be totally monotone. /// /// The function returns a vector of `(usize, T)`. The `usize` in the /// tuple at index `j` tells you the row of the minimum value in /// column `j` and the `T` value is minimum value itself. /// /// The algorithm only considers values above the main diagonal, which /// means that it computes values `v(j)` where: /// /// ```text /// v(0) = initial /// v(j) = min { M[i, j] | i < j } for j > 0 /// ``` /// /// If we let `r(j)` denote the row index of the minimum value in /// column `j`, the tuples in the result vector become `(r(j), M[r(j), /// j])`. /// /// The algorithm is an *online* algorithm, in the sense that `matrix` /// function can refer back to previously computed column minima when /// determining an entry in the matrix. The guarantee is that we only /// call `matrix(i, j)` after having computed `v(i)`. This is /// reflected in the `&[(usize, T)]` argument to `matrix`, which grows /// as more and more values are computed. pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>( initial: T, size: usize, matrix: M, ) -> Vec<(usize, T)> { let mut result = vec![(0, initial)]; // State used by the algorithm. let mut finished = 0; let mut base = 0; let mut tentative = 0; // Shorthand for evaluating the matrix. We need a macro here since // we don't want to borrow the result vector. macro_rules! m { ($i:expr, $j:expr) => {{ assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j); assert!( $i < size && $j < size, "(i, j) out of bounds: ({}, {}), size: {}", $i, $j, size ); matrix(&result[..finished + 1], $i, $j) }}; } // Keep going until we have finished all size columns. Since the // columns are zero-indexed, we're done when finished == size - 1. while finished < size - 1 { // First case: we have already advanced past the previous // tentative value. We make a new tentative value by applying // smawk_inner to the largest square submatrix that fits under // the base. let i = finished + 1; if i > tentative { let rows = (base..finished + 1).collect::<Vec<_>>(); tentative = std::cmp::min(finished + rows.len(), size - 1); let cols = (finished + 1..tentative + 1).collect::<Vec<_>>(); let mut minima = vec![0; tentative + 1]; smawk_inner(&|i, j| m![i, j], &rows, &cols, &mut minima); for col in cols { let row = minima[col]; let v = m![row, col]; if col >= result.len() { result.push((row, v)); } else if v < result[col].1 { result[col] = (row, v); } } finished = i; continue; } // Second case: the new column minimum is on the diagonal. All // subsequent ones will be at least as low, so we can clear // out all our work from higher rows. As in the fourth case, // the loss of tentative is amortized against the increase in // base. let diag = m![i - 1, i]; if diag < result[i].1 { result[i] = (i - 1, diag); base = i - 1; tentative = i; finished = i; continue; } // Third case: row i-1 does not supply a column minimum in any // column up to tentative. We simply advance finished while // maintaining the invariant. if m![i - 1, tentative] >= result[tentative].1 { finished = i; continue; } // Fourth and final case: a new column minimum at tentative. // This allows us to make progress by incorporating rows prior // to finished into the base. The base invariant holds because // these rows cannot supply any later column minima. The work // done when we last advanced tentative (and undone by this // step) can be amortized against the increase in base. base = i - 1; tentative = i; finished = i; } result } #[cfg(test)] mod tests { use super::*; #[test] fn smawk_1x1() { let matrix = vec![vec![2]]; assert_eq!(smawk_row_minima(&matrix), vec![0]); assert_eq!(smawk_column_minima(&matrix), vec![0]); } #[test] fn smawk_2x1() {
let
identifier_name
lib.rs
//! vec![2, 1, 3, 3, 4], //! vec![2, 1, 3, 3, 4], //! vec![3, 2, 4, 3, 4], //! vec![4, 3, 2, 1, 1], //! ]; //! let minima = vec![1, 1, 4, 4, 4]; //! assert_eq!(smawk_column_minima(&matrix), minima); //! ``` //! //! The `minima` vector gives the index of the minimum value per //! column, so `minima[0] == 1` since the minimum value in the first //! column is 2 (row 1). Note that the smallest row index is returned. //! //! # Definitions //! //! Some of the functions in this crate only work on matrices that are //! *totally monotone*, which we will define below. //! //! ## Monotone Matrices //! //! We start with a helper definition. Given an *m* ✕ *n* matrix `M`, //! we say that `M` is *monotone* when the minimum value of row `i` is //! found to the left of the minimum value in row `i'` where `i < i'`. //! //! More formally, if we let `rm(i)` denote the column index of the //! left-most minimum value in row `i`, then we have //! //! ```text //! rm(0) ≤ rm(1) ≤ ... ≤ rm(m - 1) //! ``` //! //! This means that as you go down the rows from top to bottom, the //! row-minima proceed from left to right. //! //! The algorithms in this crate deal with finding such row- and //! column-minima. //! //! ## Totally Monotone Matrices //! //! We say that a matrix `M` is *totally monotone* when every //! sub-matrix is monotone. A sub-matrix is formed by the intersection //! of any two rows `i < i'` and any two columns `j < j'`. //! //! This is often expressed as via this equivalent condition: //! //! ```text //! M[i, j] > M[i, j'] => M[i', j] > M[i', j'] //! ``` //! //! for all `i < i'` and `j < j'`. //! //! ## Monge Property for Matrices //! //! A matrix `M` is said to fulfill the *Monge property* if //! //! ```text //! M[i, j] + M[i', j'] ≤ M[i, j'] + M[i', j] //! ``` //! //! for all `i < i'` and `j < j'`. This says that given any rectangle //! in the matrix, the sum of the top-left and bottom-right corners is //! less than or equal to the sum of the bottom-left and upper-right //! corners. //! //! All Monge matrices are totally monotone, so it is enough to //! establish that the Monge property holds in order to use a matrix //! with the functions in this crate. If your program is dealing with //! unknown inputs, it can use [`monge::is_monge`] to verify that a //! matrix is a Monge matrix. #![doc(html_root_url = "https://docs.rs/smawk/0.3.1")] #[cfg(feature = "ndarray")] pub mod brute_force; pub mod monge; #[cfg(feature = "ndarray")] pub mod recursive; /// Minimal matrix trait for two-dimensional arrays. /// /// This provides the functionality needed to represent a read-only /// numeric matrix. You can query the size of the matrix and access /// elements. Modeled after [`ndarray::Array2`] from the [ndarray /// crate](https://crates.io/crates/ndarray). /// /// Enable the `ndarray` Cargo feature if you want to use it with /// `ndarray::Array2`. pub trait Matrix<T: Copy> { /// Return the number of rows. fn nrows(&self) -> usize; /// Return the number of columns. fn ncols(&self) -> usize; /// Return a matrix element. fn index(&self, row: usize, column: usize) -> T; } /// Simple and inefficient matrix representation used for doctest /// examples and simple unit tests. /// /// You should prefer implementing it yourself, or you can enable the /// `ndarray` Cargo feature and use the provided implementation for /// [`ndarray::Array2`]. impl<T: Copy> Matrix<T> for Vec<Vec<T>> { fn nrows(&self) -> usize { self.len() } fn ncols(&self) -> usize { self[0].len() } fn index(&self, row: usize, column: usize) -> T { self[row][column] } } /// Adapting [`ndarray::Array2`] to the `Matrix` trait. /// /// **Note: this implementation is only available if you enable the /// `ndarray` Cargo feature.** #[cfg(feature = "ndarray")] impl<T: Copy> Matrix<T> for ndarray::Array2<T> { #[inline] fn nrows(&self) -> usize { self.nrows() } #[inline] fn ncols(&self) -> usize { self.ncols() } #[inline] fn index(&self, row: usize, column: usize) -> T { self[[row, column]] } } /// Compute row minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding row minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero columns. pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { // Benchmarking shows that SMAWK performs roughly the same on row- // and column-major matrices. let mut minima = vec![0; matrix.nrows()]; smawk_inner( &|j, i| matrix.index(i, j), &(0..matrix.ncols()).collect::<Vec<_>>(), &(0..matrix.nrows()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding column minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero rows. pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { let mut minima = vec![0; matrix.ncols()]; smawk_inner( &|i, j| matrix.index(i, j), &(0..matrix.nrows()).collect::<Vec<_>>(), &(0..matrix.ncols()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in the given area of the matrix. The /// `minima` slice is updated inplace. fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>( matrix: &M, rows: &[usize], cols: &[usize], mut minima: &mut [usize], ) { if cols.is_empty() { return; } let mut stack = Vec::with_capacity(cols.len()); for r in rows { // TODO: use stack.last() instead of stack.is_empty() etc while !stack.is_empty() && matrix(stack[stack.len() - 1], cols[stack.len() - 1]) > matrix(*r, cols[stack.len() - 1]) { stack.pop(); } if stack.len() != cols.len() { stack.push(*r); } } let rows = &stack; let mut odd_cols
//! ``` //! use smawk::{Matrix, smawk_column_minima}; //! //! let matrix = vec![ //! vec![3, 2, 4, 5, 6],
random_line_split
lib.rs
inputs, it can use [`monge::is_monge`] to verify that a //! matrix is a Monge matrix. #![doc(html_root_url = "https://docs.rs/smawk/0.3.1")] #[cfg(feature = "ndarray")] pub mod brute_force; pub mod monge; #[cfg(feature = "ndarray")] pub mod recursive; /// Minimal matrix trait for two-dimensional arrays. /// /// This provides the functionality needed to represent a read-only /// numeric matrix. You can query the size of the matrix and access /// elements. Modeled after [`ndarray::Array2`] from the [ndarray /// crate](https://crates.io/crates/ndarray). /// /// Enable the `ndarray` Cargo feature if you want to use it with /// `ndarray::Array2`. pub trait Matrix<T: Copy> { /// Return the number of rows. fn nrows(&self) -> usize; /// Return the number of columns. fn ncols(&self) -> usize; /// Return a matrix element. fn index(&self, row: usize, column: usize) -> T; } /// Simple and inefficient matrix representation used for doctest /// examples and simple unit tests. /// /// You should prefer implementing it yourself, or you can enable the /// `ndarray` Cargo feature and use the provided implementation for /// [`ndarray::Array2`]. impl<T: Copy> Matrix<T> for Vec<Vec<T>> { fn nrows(&self) -> usize { self.len() } fn ncols(&self) -> usize { self[0].len() } fn index(&self, row: usize, column: usize) -> T { self[row][column] } } /// Adapting [`ndarray::Array2`] to the `Matrix` trait. /// /// **Note: this implementation is only available if you enable the /// `ndarray` Cargo feature.** #[cfg(feature = "ndarray")] impl<T: Copy> Matrix<T> for ndarray::Array2<T> { #[inline] fn nrows(&self) -> usize { self.nrows() } #[inline] fn ncols(&self) -> usize { self.ncols() } #[inline] fn index(&self, row: usize, column: usize) -> T { self[[row, column]] } } /// Compute row minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding row minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero columns. pub fn smawk_row_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { // Benchmarking shows that SMAWK performs roughly the same on row- // and column-major matrices. let mut minima = vec![0; matrix.nrows()]; smawk_inner( &|j, i| matrix.index(i, j), &(0..matrix.ncols()).collect::<Vec<_>>(), &(0..matrix.nrows()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute column minima in O(*m* + *n*) time. /// /// This implements the SMAWK algorithm for finding column minima in a /// totally monotone matrix. /// /// The SMAWK algorithm is from Agarwal, Klawe, Moran, Shor, and /// Wilbur, *Geometric applications of a matrix searching algorithm*, /// Algorithmica 2, pp. 195-208 (1987) and the code here is a /// translation [David Eppstein's Python code][pads]. /// /// [pads]: https://github.com/jfinkels/PADS/blob/master/pads/smawk.py /// /// Running time on an *m* ✕ *n* matrix: O(*m* + *n*). /// /// # Panics /// /// It is an error to call this on a matrix with zero rows. pub fn smawk_column_minima<T: PartialOrd + Copy, M: Matrix<T>>(matrix: &M) -> Vec<usize> { let mut mi
lumn minima in the given area of the matrix. The /// `minima` slice is updated inplace. fn smawk_inner<T: PartialOrd + Copy, M: Fn(usize, usize) -> T>( matrix: &M, rows: &[usize], cols: &[usize], mut minima: &mut [usize], ) { if cols.is_empty() { return; } let mut stack = Vec::with_capacity(cols.len()); for r in rows { // TODO: use stack.last() instead of stack.is_empty() etc while !stack.is_empty() && matrix(stack[stack.len() - 1], cols[stack.len() - 1]) > matrix(*r, cols[stack.len() - 1]) { stack.pop(); } if stack.len() != cols.len() { stack.push(*r); } } let rows = &stack; let mut odd_cols = Vec::with_capacity(1 + cols.len() / 2); for (idx, c) in cols.iter().enumerate() { if idx % 2 == 1 { odd_cols.push(*c); } } smawk_inner(matrix, rows, &odd_cols, &mut minima); let mut r = 0; for (c, &col) in cols.iter().enumerate().filter(|(c, _)| c % 2 == 0) { let mut row = rows[r]; let last_row = if c == cols.len() - 1 { rows[rows.len() - 1] } else { minima[cols[c + 1]] }; let mut pair = (matrix(row, col), row); while row != last_row { r += 1; row = rows[r]; if (matrix(row, col), row) < pair { pair = (matrix(row, col), row); } } minima[col] = pair.1; } } /// Compute upper-right column minima in O(*m* + *n*) time. /// /// The input matrix must be totally monotone. /// /// The function returns a vector of `(usize, T)`. The `usize` in the /// tuple at index `j` tells you the row of the minimum value in /// column `j` and the `T` value is minimum value itself. /// /// The algorithm only considers values above the main diagonal, which /// means that it computes values `v(j)` where: /// /// ```text /// v(0) = initial /// v(j) = min { M[i, j] | i < j } for j > 0 /// ``` /// /// If we let `r(j)` denote the row index of the minimum value in /// column `j`, the tuples in the result vector become `(r(j), M[r(j), /// j])`. /// /// The algorithm is an *online* algorithm, in the sense that `matrix` /// function can refer back to previously computed column minima when /// determining an entry in the matrix. The guarantee is that we only /// call `matrix(i, j)` after having computed `v(i)`. This is /// reflected in the `&[(usize, T)]` argument to `matrix`, which grows /// as more and more values are computed. pub fn online_column_minima<T: Copy + PartialOrd, M: Fn(&[(usize, T)], usize, usize) -> T>( initial: T, size: usize, matrix: M, ) -> Vec<(usize, T)> { let mut result = vec![(0, initial)]; // State used by the algorithm. let mut finished = 0; let mut base = 0; let mut tentative = 0; // Shorthand for evaluating the matrix. We need a macro here since // we don't want to borrow the result vector. macro_rules! m { ($i:expr, $j:expr) => {{ assert!($i < $j, "(i, j) not above diagonal: ({}, {})", $i, $j); assert!( $i < size && $j < size, "(i, j) out of bounds: ({}, {}), size: {}", $i, $j, size );
nima = vec![0; matrix.ncols()]; smawk_inner( &|i, j| matrix.index(i, j), &(0..matrix.nrows()).collect::<Vec<_>>(), &(0..matrix.ncols()).collect::<Vec<_>>(), &mut minima, ); minima } /// Compute co
identifier_body
key.go
created, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2018-04-03T21:10:29.600Z` TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // The OCID of the vault that contains this key. VaultId *string `mandatory:"true" json:"vaultId"` // Defined tags for this resource. Each key is predefined and scoped to a namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Operations": {"CostCenter": "42"}}` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` // The key's protection mode indicates how the key persists and where cryptographic operations that use the key are performed. // A protection mode of `HSM` means that the key persists on a hardware security module (HSM) and all cryptographic operations are performed inside // the HSM. A protection mode of `SOFTWARE` means that the key persists on the server, protected by the vault's RSA wrapping key which persists // on the HSM. All cryptographic operations that use a key with a protection mode of `SOFTWARE` are performed on the server. By default, // a key's protection mode is set to `HSM`. You can't change a key's protection mode after the key is created or imported. ProtectionMode KeyProtectionModeEnum `mandatory:"false" json:"protectionMode,omitempty"` // An optional property indicating when to delete the key, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2019-04-03T21:10:29.600Z` TimeOfDeletion *common.SDKTime `mandatory:"false" json:"timeOfDeletion"` // The OCID of the key from which this key was restored. RestoredFromKeyId *string `mandatory:"false" json:"restoredFromKeyId"` ReplicaDetails *KeyReplicaDetails `mandatory:"false" json:"replicaDetails"` IsPrimary *bool `mandatory:"false" json:"isPrimary"` } func (m Key) String() string { return common.PointerString(m) } // ValidateEnumValue returns an error when providing an unsupported enum value // This function is being called during constructing API request process // Not recommended for calling this function directly func (m Key) ValidateEnumValue() (bool, error) { errMessage := []string{} if _, ok := GetMappingKeyLifecycleStateEnum(string(m.LifecycleState)); !ok && m.LifecycleState != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for LifecycleState: %s. Supported values are: %s.", m.LifecycleState, strings.Join(GetKeyLifecycleStateEnumStringValues(), ","))) } if _, ok := GetMappingKeyProtectionModeEnum(string(m.ProtectionMode)); !ok && m.ProtectionMode != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for ProtectionMode: %s. Supported values are: %s.", m.ProtectionMode, strings.Join(GetKeyProtectionModeEnumStringValues(), ","))) } if len(errMessage) > 0 { return true, fmt.Errorf(strings.Join(errMessage, "\n")) } return false, nil } // KeyProtectionModeEnum Enum with underlying type: string type KeyProtectionModeEnum string // Set of constants representing the allowable values for KeyProtectionModeEnum const ( KeyProtectionModeHsm KeyProtectionModeEnum = "HSM" KeyProtectionModeSoftware KeyProtectionModeEnum = "SOFTWARE" ) var mappingKeyProtectionModeEnum = map[string]KeyProtectionModeEnum{ "HSM": KeyProtectionModeHsm, "SOFTWARE": KeyProtectionModeSoftware, } var mappingKeyProtectionModeEnumLowerCase = map[string]KeyProtectionModeEnum{ "hsm": KeyProtectionModeHsm, "software": KeyProtectionModeSoftware, } // GetKeyProtectionModeEnumValues Enumerates the set of values for KeyProtectionModeEnum func GetKeyProtectionModeEnumValues() []KeyProtectionModeEnum { values := make([]KeyProtectionModeEnum, 0) for _, v := range mappingKeyProtectionModeEnum { values = append(values, v) } return values } // GetKeyProtectionModeEnumStringValues Enumerates the set of values in String for KeyProtectionModeEnum func GetKeyProtectionModeEnumStringValues() []string { return []string{ "HSM", "SOFTWARE", } } // GetMappingKeyProtectionModeEnum performs case Insensitive comparison on enum value and return the desired enum func GetMappingKeyProtectionModeEnum(val string) (KeyProtectionModeEnum, bool) { enum, ok := mappingKeyProtectionModeEnumLowerCase[strings.ToLower(val)] return enum, ok } // KeyLifecycleStateEnum Enum with underlying type: string type KeyLifecycleStateEnum string // Set of constants representing the allowable values for KeyLifecycleStateEnum const ( KeyLifecycleStateCreating KeyLifecycleStateEnum = "CREATING" KeyLifecycleStateEnabling KeyLifecycleStateEnum = "ENABLING" KeyLifecycleStateEnabled KeyLifecycleStateEnum = "ENABLED" KeyLifecycleStateDisabling KeyLifecycleStateEnum = "DISABLING" KeyLifecycleStateDisabled KeyLifecycleStateEnum = "DISABLED" KeyLifecycleStateDeleting KeyLifecycleStateEnum = "DELETING" KeyLifecycleStateDeleted KeyLifecycleStateEnum = "DELETED" KeyLifecycleStatePendingDeletion KeyLifecycleStateEnum = "PENDING_DELETION" KeyLifecycleStateSchedulingDeletion KeyLifecycleStateEnum = "SCHEDULING_DELETION" KeyLifecycleStateCancellingDeletion KeyLifecycleStateEnum = "CANCELLING_DELETION" KeyLifecycleStateUpdating KeyLifecycleStateEnum = "UPDATING" KeyLifecycleStateBackupInProgress KeyLifecycleStateEnum = "BACKUP_IN_PROGRESS" KeyLifecycleStateRestoring KeyLifecycleStateEnum = "RESTORING" ) var mappingKeyLifecycleStateEnum = map[string]KeyLifecycleStateEnum{ "CREATING": KeyLifecycleStateCreating, "ENABLING": KeyLifecycleStateEnabling, "ENABLED": KeyLifecycleStateEnabled, "DISABLING": KeyLifecycleStateDisabling, "DISABLED": KeyLifecycleStateDisabled, "DELETING": KeyLifecycleStateDeleting, "DELETED": KeyLifecycleStateDeleted, "PENDING_DELETION": KeyLifecycleStatePendingDeletion, "SCHEDULING_DELETION": KeyLifecycleStateSchedulingDeletion, "CANCELLING_DELETION": KeyLifecycleStateCancellingDeletion, "UPDATING": KeyLifecycleStateUpdating, "BACKUP_IN_PROGRESS": KeyLifecycleStateBackupInProgress, "RESTORING": KeyLifecycleStateRestoring, } var mappingKeyLifecycleStateEnumLowerCase = map[string]KeyLifecycleStateEnum{ "creating": KeyLifecycleStateCreating, "enabling": KeyLifecycleStateEnabling, "enabled": KeyLifecycleStateEnabled, "disabling": KeyLifecycleStateDisabling, "disabled": KeyLifecycleStateDisabled, "deleting": KeyLifecycleStateDeleting, "deleted": KeyLifecycleStateDeleted, "pending_deletion": KeyLifecycleStatePendingDeletion, "scheduling_deletion": KeyLifecycleStateSchedulingDeletion, "cancelling_deletion": KeyLifecycleStateCancellingDeletion, "updating": KeyLifecycleStateUpdating, "backup_in_progress": KeyLifecycleStateBackupInProgress, "restoring": KeyLifecycleStateRestoring, } // GetKeyLifecycleStateEnumValues Enumerates the set of values for KeyLifecycleStateEnum func GetKeyLifecycleStateEnumValues() []KeyLifecycleStateEnum { values := make([]KeyLifecycleStateEnum, 0) for _, v := range mappingKeyLifecycleStateEnum { values = append(values, v) } return values } // GetKeyLifecycleStateEnumStringValues Enumerates the set of values in String for KeyLifecycleStateEnum func GetKeyLifecycleStateEnumStringValues() []string { return []string{ "CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION", "UPDATING", "BACKUP_IN_PROGRESS", "RESTORING", } } // GetMappingKeyLifecycleStateEnum performs case Insensitive comparison on enum value and return the desired enum func GetMappingKeyLifecycleStateEnum(val string) (KeyLifecycleStateEnum, bool)
{ enum, ok := mappingKeyLifecycleStateEnumLowerCase[strings.ToLower(val)] return enum, ok }
identifier_body
key.go
// Example: `ENABLED` LifecycleState KeyLifecycleStateEnum `mandatory:"true" json:"lifecycleState"` // The date and time the key was created, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2018-04-03T21:10:29.600Z` TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // The OCID of the vault that contains this key. VaultId *string `mandatory:"true" json:"vaultId"` // Defined tags for this resource. Each key is predefined and scoped to a namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Operations": {"CostCenter": "42"}}` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` // The key's protection mode indicates how the key persists and where cryptographic operations that use the key are performed. // A protection mode of `HSM` means that the key persists on a hardware security module (HSM) and all cryptographic operations are performed inside // the HSM. A protection mode of `SOFTWARE` means that the key persists on the server, protected by the vault's RSA wrapping key which persists // on the HSM. All cryptographic operations that use a key with a protection mode of `SOFTWARE` are performed on the server. By default, // a key's protection mode is set to `HSM`. You can't change a key's protection mode after the key is created or imported. ProtectionMode KeyProtectionModeEnum `mandatory:"false" json:"protectionMode,omitempty"` // An optional property indicating when to delete the key, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2019-04-03T21:10:29.600Z` TimeOfDeletion *common.SDKTime `mandatory:"false" json:"timeOfDeletion"` // The OCID of the key from which this key was restored. RestoredFromKeyId *string `mandatory:"false" json:"restoredFromKeyId"` ReplicaDetails *KeyReplicaDetails `mandatory:"false" json:"replicaDetails"` IsPrimary *bool `mandatory:"false" json:"isPrimary"` } func (m Key) String() string { return common.PointerString(m) } // ValidateEnumValue returns an error when providing an unsupported enum value // This function is being called during constructing API request process // Not recommended for calling this function directly func (m Key) ValidateEnumValue() (bool, error) { errMessage := []string{} if _, ok := GetMappingKeyLifecycleStateEnum(string(m.LifecycleState)); !ok && m.LifecycleState != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for LifecycleState: %s. Supported values are: %s.", m.LifecycleState, strings.Join(GetKeyLifecycleStateEnumStringValues(), ","))) } if _, ok := GetMappingKeyProtectionModeEnum(string(m.ProtectionMode)); !ok && m.ProtectionMode != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for ProtectionMode: %s. Supported values are: %s.", m.ProtectionMode, strings.Join(GetKeyProtectionModeEnumStringValues(), ","))) } if len(errMessage) > 0 { return true, fmt.Errorf(strings.Join(errMessage, "\n")) } return false, nil } // KeyProtectionModeEnum Enum with underlying type: string type KeyProtectionModeEnum string // Set of constants representing the allowable values for KeyProtectionModeEnum const ( KeyProtectionModeHsm KeyProtectionModeEnum = "HSM" KeyProtectionModeSoftware KeyProtectionModeEnum = "SOFTWARE" ) var mappingKeyProtectionModeEnum = map[string]KeyProtectionModeEnum{ "HSM": KeyProtectionModeHsm, "SOFTWARE": KeyProtectionModeSoftware, } var mappingKeyProtectionModeEnumLowerCase = map[string]KeyProtectionModeEnum{ "hsm": KeyProtectionModeHsm, "software": KeyProtectionModeSoftware, } // GetKeyProtectionModeEnumValues Enumerates the set of values for KeyProtectionModeEnum func GetKeyProtectionModeEnumValues() []KeyProtectionModeEnum { values := make([]KeyProtectionModeEnum, 0) for _, v := range mappingKeyProtectionModeEnum { values = append(values, v) } return values } // GetKeyProtectionModeEnumStringValues Enumerates the set of values in String for KeyProtectionModeEnum func GetKeyProtectionModeEnumStringValues() []string { return []string{ "HSM", "SOFTWARE", } } // GetMappingKeyProtectionModeEnum performs case Insensitive comparison on enum value and return the desired enum func GetMappingKeyProtectionModeEnum(val string) (KeyProtectionModeEnum, bool) { enum, ok := mappingKeyProtectionModeEnumLowerCase[strings.ToLower(val)] return enum, ok } // KeyLifecycleStateEnum Enum with underlying type: string type KeyLifecycleStateEnum string // Set of constants representing the allowable values for KeyLifecycleStateEnum const ( KeyLifecycleStateCreating KeyLifecycleStateEnum = "CREATING" KeyLifecycleStateEnabling KeyLifecycleStateEnum = "ENABLING" KeyLifecycleStateEnabled KeyLifecycleStateEnum = "ENABLED" KeyLifecycleStateDisabling KeyLifecycleStateEnum = "DISABLING" KeyLifecycleStateDisabled KeyLifecycleStateEnum = "DISABLED" KeyLifecycleStateDeleting KeyLifecycleStateEnum = "DELETING" KeyLifecycleStateDeleted KeyLifecycleStateEnum = "DELETED" KeyLifecycleStatePendingDeletion KeyLifecycleStateEnum = "PENDING_DELETION" KeyLifecycleStateSchedulingDeletion KeyLifecycleStateEnum = "SCHEDULING_DELETION" KeyLifecycleStateCancellingDeletion KeyLifecycleStateEnum = "CANCELLING_DELETION" KeyLifecycleStateUpdating KeyLifecycleStateEnum = "UPDATING" KeyLifecycleStateBackupInProgress KeyLifecycleStateEnum = "BACKUP_IN_PROGRESS" KeyLifecycleStateRestoring KeyLifecycleStateEnum = "RESTORING" ) var mappingKeyLifecycleStateEnum = map[string]KeyLifecycleStateEnum{ "CREATING": KeyLifecycleStateCreating, "ENABLING": KeyLifecycleStateEnabling, "ENABLED": KeyLifecycleStateEnabled, "DISABLING": KeyLifecycleStateDisabling, "DISABLED": KeyLifecycleStateDisabled, "DELETING": KeyLifecycleStateDeleting, "DELETED": KeyLifecycleStateDeleted, "PENDING_DELETION": KeyLifecycleStatePendingDeletion, "SCHEDULING_DELETION": KeyLifecycleStateSchedulingDeletion, "CANCELLING_DELETION": KeyLifecycleStateCancellingDeletion, "UPDATING": KeyLifecycleStateUpdating, "BACKUP_IN_PROGRESS": KeyLifecycleStateBackupInProgress, "RESTORING": KeyLifecycleStateRestoring, } var mappingKeyLifecycleStateEnumLowerCase = map[string]KeyLifecycleStateEnum{ "creating": KeyLifecycleStateCreating, "enabling": KeyLifecycleStateEnabling, "enabled": KeyLifecycleStateEnabled, "disabling": KeyLifecycleStateDisabling, "disabled": KeyLifecycleStateDisabled, "deleting": KeyLifecycleStateDeleting, "deleted": KeyLifecycleStateDeleted, "pending_deletion": KeyLifecycleStatePendingDeletion, "scheduling_deletion": KeyLifecycleStateSchedulingDeletion, "cancelling_deletion": KeyLifecycleStateCancellingDeletion, "updating": KeyLifecycleStateUpdating, "backup_in_progress": KeyLifecycleStateBackupInProgress, "restoring": KeyLifecycleStateRestoring, } // GetKeyLifecycleStateEnumValues Enumerates the set of values for KeyLifecycleStateEnum func GetKeyLifecycleStateEnumValues() []KeyLifecycleStateEnum { values := make([]KeyLifecycleStateEnum, 0) for _, v := range mappingKeyLifecycleStateEnum { values = append(values, v) } return values } // GetKeyLifecycleStateEnumStringValues Enumerates the set of values in String for KeyLifecycleStateEnum func GetKeyLifecycleStateEnumStringValues() []string { return []string{ "CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION", "UPDATING", "BACKUP_IN_PROGRESS", "RESTORING", } } // GetMappingKeyLifecycleStateEnum performs case Insensitive comparison on enum value and return the desired enum func
GetMappingKeyLifecycleStateEnum
identifier_name
key.go
secrets, see the Vault Service // Secret Management API. For the API for retrieving secrets, see the Vault Service Secret Retrieval API.) // package keymanagement import ( "fmt" "github.com/oracle/oci-go-sdk/v60/common" "strings" ) // Key The representation of Key type Key struct { // The OCID of the compartment that contains this master encryption key. CompartmentId *string `mandatory:"true" json:"compartmentId"` // The OCID of the key version used in cryptographic operations. During key rotation, the service might be // in a transitional state where this or a newer key version are used intermittently. The `currentKeyVersion` // property is updated when the service is guaranteed to use the new key version for all subsequent encryption operations. CurrentKeyVersion *string `mandatory:"true" json:"currentKeyVersion"` // A user-friendly name for the key. It does not have to be unique, and it is changeable. // Avoid entering confidential information. DisplayName *string `mandatory:"true" json:"displayName"` // The OCID of the key. Id *string `mandatory:"true" json:"id"` KeyShape *KeyShape `mandatory:"true" json:"keyShape"` // The key's current lifecycle state. // Example: `ENABLED` LifecycleState KeyLifecycleStateEnum `mandatory:"true" json:"lifecycleState"` // The date and time the key was created, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2018-04-03T21:10:29.600Z` TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // The OCID of the vault that contains this key. VaultId *string `mandatory:"true" json:"vaultId"` // Defined tags for this resource. Each key is predefined and scoped to a namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Operations": {"CostCenter": "42"}}` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` // The key's protection mode indicates how the key persists and where cryptographic operations that use the key are performed. // A protection mode of `HSM` means that the key persists on a hardware security module (HSM) and all cryptographic operations are performed inside // the HSM. A protection mode of `SOFTWARE` means that the key persists on the server, protected by the vault's RSA wrapping key which persists // on the HSM. All cryptographic operations that use a key with a protection mode of `SOFTWARE` are performed on the server. By default, // a key's protection mode is set to `HSM`. You can't change a key's protection mode after the key is created or imported. ProtectionMode KeyProtectionModeEnum `mandatory:"false" json:"protectionMode,omitempty"` // An optional property indicating when to delete the key, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2019-04-03T21:10:29.600Z` TimeOfDeletion *common.SDKTime `mandatory:"false" json:"timeOfDeletion"` // The OCID of the key from which this key was restored. RestoredFromKeyId *string `mandatory:"false" json:"restoredFromKeyId"` ReplicaDetails *KeyReplicaDetails `mandatory:"false" json:"replicaDetails"` IsPrimary *bool `mandatory:"false" json:"isPrimary"` } func (m Key) String() string { return common.PointerString(m) } // ValidateEnumValue returns an error when providing an unsupported enum value // This function is being called during constructing API request process // Not recommended for calling this function directly func (m Key) ValidateEnumValue() (bool, error) { errMessage := []string{}
if _, ok := GetMappingKeyLifecycleStateEnum(string(m.LifecycleState)); !ok && m.LifecycleState != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for LifecycleState: %s. Supported values are: %s.", m.LifecycleState, strings.Join(GetKeyLifecycleStateEnumStringValues(), ","))) } if _, ok := GetMappingKeyProtectionModeEnum(string(m.ProtectionMode)); !ok && m.ProtectionMode != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for ProtectionMode: %s. Supported values are: %s.", m.ProtectionMode, strings.Join(GetKeyProtectionModeEnumStringValues(), ","))) } if len(errMessage) > 0 { return true, fmt.Errorf(strings.Join(errMessage, "\n")) } return false, nil } // KeyProtectionModeEnum Enum with underlying type: string type KeyProtectionModeEnum string // Set of constants representing the allowable values for KeyProtectionModeEnum const ( KeyProtectionModeHsm KeyProtectionModeEnum = "HSM" KeyProtectionModeSoftware KeyProtectionModeEnum = "SOFTWARE" ) var mappingKeyProtectionModeEnum = map[string]KeyProtectionModeEnum{ "HSM": KeyProtectionModeHsm, "SOFTWARE": KeyProtectionModeSoftware, } var mappingKeyProtectionModeEnumLowerCase = map[string]KeyProtectionModeEnum{ "hsm": KeyProtectionModeHsm, "software": KeyProtectionModeSoftware, } // GetKeyProtectionModeEnumValues Enumerates the set of values for KeyProtectionModeEnum func GetKeyProtectionModeEnumValues() []KeyProtectionModeEnum { values := make([]KeyProtectionModeEnum, 0) for _, v := range mappingKeyProtectionModeEnum { values = append(values, v) } return values } // GetKeyProtectionModeEnumStringValues Enumerates the set of values in String for KeyProtectionModeEnum func GetKeyProtectionModeEnumStringValues() []string { return []string{ "HSM", "SOFTWARE", } } // GetMappingKeyProtectionModeEnum performs case Insensitive comparison on enum value and return the desired enum func GetMappingKeyProtectionModeEnum(val string) (KeyProtectionModeEnum, bool) { enum, ok := mappingKeyProtectionModeEnumLowerCase[strings.ToLower(val)] return enum, ok } // KeyLifecycleStateEnum Enum with underlying type: string type KeyLifecycleStateEnum string // Set of constants representing the allowable values for KeyLifecycleStateEnum const ( KeyLifecycleStateCreating KeyLifecycleStateEnum = "CREATING" KeyLifecycleStateEnabling KeyLifecycleStateEnum = "ENABLING" KeyLifecycleStateEnabled KeyLifecycleStateEnum = "ENABLED" KeyLifecycleStateDisabling KeyLifecycleStateEnum = "DISABLING" KeyLifecycleStateDisabled KeyLifecycleStateEnum = "DISABLED" KeyLifecycleStateDeleting KeyLifecycleStateEnum = "DELETING" KeyLifecycleStateDeleted KeyLifecycleStateEnum = "DELETED" KeyLifecycleStatePendingDeletion KeyLifecycleStateEnum = "PENDING_DELETION" KeyLifecycleStateSchedulingDeletion KeyLifecycleStateEnum = "SCHEDULING_DELETION" KeyLifecycleStateCancellingDeletion KeyLifecycleStateEnum = "CANCELLING_DELETION" KeyLifecycleStateUpdating KeyLifecycleStateEnum = "UPDATING" KeyLifecycleStateBackupInProgress KeyLifecycleStateEnum = "BACKUP_IN_PROGRESS" KeyLifecycleStateRestoring KeyLifecycleStateEnum = "RESTORING" ) var mappingKeyLifecycleStateEnum = map[string]KeyLifecycleStateEnum{ "CREATING": KeyLifecycleStateCreating, "ENABLING": KeyLifecycleStateEnabling, "ENABLED": KeyLifecycleStateEnabled, "DISABLING": KeyLifecycleStateDisabling, "DISABLED": KeyLifecycleStateDisabled, "DELETING": KeyLifecycleStateDeleting, "DELETED": KeyLifecycleStateDeleted, "PENDING_DELETION": KeyLifecycleStatePendingDeletion, "SCHEDULING_DELETION": KeyLifecycleStateSchedulingDeletion, "CANCELLING_DELETION": KeyLifecycleStateCancellingDeletion, "UPDATING": KeyLifecycleStateUpdating, "BACKUP_IN_PROGRESS": KeyLifecycleStateBackupInProgress, "RESTORING": KeyLifecycleStateRestoring, } var mappingKeyLifecycleStateEnumLowerCase = map[string]KeyLifecycleStateEnum{ "creating": KeyLifecycleStateCreating, "enabling": KeyLifecycleStateEnabling, "enabled": KeyLifecycleStateEnabled, "disabling": KeyLifecycleStateDisabling, "disabled": KeyLifecycleStateDisabled, "deleting": KeyLifecycleStateDeleting, "deleted": KeyLifecycleStateDeleted, "pending_deletion": KeyLifecycleStatePendingDeletion, "scheduling_deletion": KeyLifecycleStateSchedulingDeletion, "cancelling_deletion": KeyLifecycleStateCancelling
random_line_split
key.go
, see the Vault Service // Secret Management API. For the API for retrieving secrets, see the Vault Service Secret Retrieval API.) // package keymanagement import ( "fmt" "github.com/oracle/oci-go-sdk/v60/common" "strings" ) // Key The representation of Key type Key struct { // The OCID of the compartment that contains this master encryption key. CompartmentId *string `mandatory:"true" json:"compartmentId"` // The OCID of the key version used in cryptographic operations. During key rotation, the service might be // in a transitional state where this or a newer key version are used intermittently. The `currentKeyVersion` // property is updated when the service is guaranteed to use the new key version for all subsequent encryption operations. CurrentKeyVersion *string `mandatory:"true" json:"currentKeyVersion"` // A user-friendly name for the key. It does not have to be unique, and it is changeable. // Avoid entering confidential information. DisplayName *string `mandatory:"true" json:"displayName"` // The OCID of the key. Id *string `mandatory:"true" json:"id"` KeyShape *KeyShape `mandatory:"true" json:"keyShape"` // The key's current lifecycle state. // Example: `ENABLED` LifecycleState KeyLifecycleStateEnum `mandatory:"true" json:"lifecycleState"` // The date and time the key was created, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2018-04-03T21:10:29.600Z` TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // The OCID of the vault that contains this key. VaultId *string `mandatory:"true" json:"vaultId"` // Defined tags for this resource. Each key is predefined and scoped to a namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Operations": {"CostCenter": "42"}}` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). // Example: `{"Department": "Finance"}` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` // The key's protection mode indicates how the key persists and where cryptographic operations that use the key are performed. // A protection mode of `HSM` means that the key persists on a hardware security module (HSM) and all cryptographic operations are performed inside // the HSM. A protection mode of `SOFTWARE` means that the key persists on the server, protected by the vault's RSA wrapping key which persists // on the HSM. All cryptographic operations that use a key with a protection mode of `SOFTWARE` are performed on the server. By default, // a key's protection mode is set to `HSM`. You can't change a key's protection mode after the key is created or imported. ProtectionMode KeyProtectionModeEnum `mandatory:"false" json:"protectionMode,omitempty"` // An optional property indicating when to delete the key, expressed in RFC 3339 (https://tools.ietf.org/html/rfc3339) timestamp format. // Example: `2019-04-03T21:10:29.600Z` TimeOfDeletion *common.SDKTime `mandatory:"false" json:"timeOfDeletion"` // The OCID of the key from which this key was restored. RestoredFromKeyId *string `mandatory:"false" json:"restoredFromKeyId"` ReplicaDetails *KeyReplicaDetails `mandatory:"false" json:"replicaDetails"` IsPrimary *bool `mandatory:"false" json:"isPrimary"` } func (m Key) String() string { return common.PointerString(m) } // ValidateEnumValue returns an error when providing an unsupported enum value // This function is being called during constructing API request process // Not recommended for calling this function directly func (m Key) ValidateEnumValue() (bool, error) { errMessage := []string{} if _, ok := GetMappingKeyLifecycleStateEnum(string(m.LifecycleState)); !ok && m.LifecycleState != "" { errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for LifecycleState: %s. Supported values are: %s.", m.LifecycleState, strings.Join(GetKeyLifecycleStateEnumStringValues(), ","))) } if _, ok := GetMappingKeyProtectionModeEnum(string(m.ProtectionMode)); !ok && m.ProtectionMode != ""
if len(errMessage) > 0 { return true, fmt.Errorf(strings.Join(errMessage, "\n")) } return false, nil } // KeyProtectionModeEnum Enum with underlying type: string type KeyProtectionModeEnum string // Set of constants representing the allowable values for KeyProtectionModeEnum const ( KeyProtectionModeHsm KeyProtectionModeEnum = "HSM" KeyProtectionModeSoftware KeyProtectionModeEnum = "SOFTWARE" ) var mappingKeyProtectionModeEnum = map[string]KeyProtectionModeEnum{ "HSM": KeyProtectionModeHsm, "SOFTWARE": KeyProtectionModeSoftware, } var mappingKeyProtectionModeEnumLowerCase = map[string]KeyProtectionModeEnum{ "hsm": KeyProtectionModeHsm, "software": KeyProtectionModeSoftware, } // GetKeyProtectionModeEnumValues Enumerates the set of values for KeyProtectionModeEnum func GetKeyProtectionModeEnumValues() []KeyProtectionModeEnum { values := make([]KeyProtectionModeEnum, 0) for _, v := range mappingKeyProtectionModeEnum { values = append(values, v) } return values } // GetKeyProtectionModeEnumStringValues Enumerates the set of values in String for KeyProtectionModeEnum func GetKeyProtectionModeEnumStringValues() []string { return []string{ "HSM", "SOFTWARE", } } // GetMappingKeyProtectionModeEnum performs case Insensitive comparison on enum value and return the desired enum func GetMappingKeyProtectionModeEnum(val string) (KeyProtectionModeEnum, bool) { enum, ok := mappingKeyProtectionModeEnumLowerCase[strings.ToLower(val)] return enum, ok } // KeyLifecycleStateEnum Enum with underlying type: string type KeyLifecycleStateEnum string // Set of constants representing the allowable values for KeyLifecycleStateEnum const ( KeyLifecycleStateCreating KeyLifecycleStateEnum = "CREATING" KeyLifecycleStateEnabling KeyLifecycleStateEnum = "ENABLING" KeyLifecycleStateEnabled KeyLifecycleStateEnum = "ENABLED" KeyLifecycleStateDisabling KeyLifecycleStateEnum = "DISABLING" KeyLifecycleStateDisabled KeyLifecycleStateEnum = "DISABLED" KeyLifecycleStateDeleting KeyLifecycleStateEnum = "DELETING" KeyLifecycleStateDeleted KeyLifecycleStateEnum = "DELETED" KeyLifecycleStatePendingDeletion KeyLifecycleStateEnum = "PENDING_DELETION" KeyLifecycleStateSchedulingDeletion KeyLifecycleStateEnum = "SCHEDULING_DELETION" KeyLifecycleStateCancellingDeletion KeyLifecycleStateEnum = "CANCELLING_DELETION" KeyLifecycleStateUpdating KeyLifecycleStateEnum = "UPDATING" KeyLifecycleStateBackupInProgress KeyLifecycleStateEnum = "BACKUP_IN_PROGRESS" KeyLifecycleStateRestoring KeyLifecycleStateEnum = "RESTORING" ) var mappingKeyLifecycleStateEnum = map[string]KeyLifecycleStateEnum{ "CREATING": KeyLifecycleStateCreating, "ENABLING": KeyLifecycleStateEnabling, "ENABLED": KeyLifecycleStateEnabled, "DISABLING": KeyLifecycleStateDisabling, "DISABLED": KeyLifecycleStateDisabled, "DELETING": KeyLifecycleStateDeleting, "DELETED": KeyLifecycleStateDeleted, "PENDING_DELETION": KeyLifecycleStatePendingDeletion, "SCHEDULING_DELETION": KeyLifecycleStateSchedulingDeletion, "CANCELLING_DELETION": KeyLifecycleStateCancellingDeletion, "UPDATING": KeyLifecycleStateUpdating, "BACKUP_IN_PROGRESS": KeyLifecycleStateBackupInProgress, "RESTORING": KeyLifecycleStateRestoring, } var mappingKeyLifecycleStateEnumLowerCase = map[string]KeyLifecycleStateEnum{ "creating": KeyLifecycleStateCreating, "enabling": KeyLifecycleStateEnabling, "enabled": KeyLifecycleStateEnabled, "disabling": KeyLifecycleStateDisabling, "disabled": KeyLifecycleStateDisabled, "deleting": KeyLifecycleStateDeleting, "deleted": KeyLifecycleStateDeleted, "pending_deletion": KeyLifecycleStatePendingDeletion, "scheduling_deletion": KeyLifecycleStateSchedulingDeletion, "cancelling_deletion": KeyLifecycleStateCanc
{ errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for ProtectionMode: %s. Supported values are: %s.", m.ProtectionMode, strings.Join(GetKeyProtectionModeEnumStringValues(), ","))) }
conditional_block
ddd.go
// Else, we only have 1 location part, we cant reduce this, so return the initial location return position_location } } } else if move_y != 0 { last_part := parts[len(parts)-1] last_part_int, _ := strconv.Atoi(last_part) if move_y == 1 { fmt.Printf("DDD Move: DOWN\n") // Moving down, increment the last_part_int last_part_int++ parts[len(parts)-1] = strconv.Itoa(last_part_int) return strings.Join(parts, ".") } else { fmt.Printf("DDD Move: UP\n") // Moving up, decrement the last_part_int last_part_int-- //if last_part_int < 0 { // last_part_int = 0 //} parts[len(parts)-1] = strconv.Itoa(last_part_int) return strings.Join(parts, ".") } } fmt.Printf("DDD Move: No Change\n") // No change in position, return the same string we received return position_location } func DddGet(position_location string, data_location string, ddd_data map[string]interface{}, udn_data map[string]interface{}) interface{} { // Get the DDD Node that describes this position //ddd_node := DddGetNode(position_location, ddd_data, udn_data) //TODO(g): SECOND! We know the DDD information, so we navigate the same way we did DDD, but we get the data // // What if it isnt available? We return an error. How? // // ?? How ?? // ??? // // Copy the looping code into all the functions, dont worry about generalizing initially, just get it working. // result := 1 return result } func _DddGetNodeCurrent(cur_data map[string]interface{}, cur_record_data interface{}, cur_pos int, processed_parts []int, cur_parts []string) (string, map[string]interface{}, interface{}) { if cur_data["keydict"] != nil { // The cur_pos will be selected based on the sorted values, because they are map-keys, they are out of order. Once sorted, they are accessed as an array index keys := MapKeys(cur_data["keydict"].(map[string]interface{})) fmt.Printf("DddGetNodeCurrent: keydict: Keys: %v\n", keys) // We didnt find it, so return nil if cur_pos >= len(keys) || cur_pos < 0 { return "nil", nil, nil } selected_key := keys[cur_pos] fmt.Printf("DddGetNodeCurrent: keydict: Selected Key: %s\n", selected_key) result_cur_data := cur_data["keydict"].(map[string]interface{})[selected_key].(map[string]interface{}) cur_record_data_map := GetResult(cur_record_data, type_map).(map[string]interface{}) result_cur_record_data := make(map[string]interface{}) if cur_record_data_map[selected_key] != nil { result_cur_record_data = GetResult(cur_record_data_map[selected_key], type_map).(map[string]interface{}) } return fmt.Sprintf("Key: %s", selected_key), result_cur_data, result_cur_record_data } else if cur_data["rowdict"] != nil { // The rowdict is inside a list, but must be further selected based on the selection field, which will determine the node //TODO(g): ... return "RowDict", cur_data, cur_record_data } else if cur_data["list"] != nil { fmt.Printf("DDDGET:LIST: %T\n", cur_data["list"]) cur_data_list := cur_data["list"].([]interface{}) // Using the cur_pos as the index offset, this works up until the "variadic" node (if present) if cur_pos >= 0 && cur_pos < len(cur_data_list) { result_cur_data := cur_data_list[cur_pos].(map[string]interface{}) var result_cur_record_data interface{} cur_record_data_array := GetResult(cur_record_data, type_array).([]interface{}) if len(cur_record_data_array) > cur_pos { result_cur_record_data = cur_record_data_array[cur_pos] } else { result_cur_record_data = nil } return fmt.Sprintf("Index: %d", cur_pos), result_cur_data, result_cur_record_data } else { return "nil", nil, nil } } else if cur_data["type"] != nil { // This is a raw data node, and should not have any indexing, only "0" for it's location position if cur_pos == 0 { return "TBD: Get Label", cur_data, cur_record_data } else { return "nil", nil, nil } } else if cur_data["variadic"] != nil { // I think I have to backtrack to a previous node then? Parent node? if cur_pos == 0 { return fmt.Sprintf("Variadic: %d", cur_pos), cur_data, cur_record_data } else { return "nil", nil, nil } } else { //TODO(g): Replace this panic with a non-fatal error... But the DDD is bad, so report it? //panic(fmt.Sprintf("Unknown DDD node: %v", cur_data)) return "nil", nil, nil } return "Unknown", cur_data, cur_record_data } func DddGetNode(position_location string, ddd_data map[string]interface{}, data_record interface{}, udn_data map[string]interface{}) (string, map[string]interface{}, interface{}) { cur_parts := strings.Split(position_location, ".") cur_label := "" fmt.Printf("DDD Get Node: Parts: %s: %v\n", position_location, cur_parts) // Current position starts from ddd_data, and then we navigate it, and return it when we find the node cur_data := ddd_data cur_record_data := data_record processed_parts := make([]int, 0) // The first "0" is always "0", and is the base cur_data, so let's pop it off if len(cur_parts) > 1 { // Add the part we just processed to our processed_parts slice to keep track of them cur_pos, _ := strconv.Atoi(cur_parts[0]) processed_parts = append(processed_parts, cur_pos) fmt.Printf("DddGetNode: Removing first part: %v\n", cur_parts) cur_parts = cur_parts[1:len(cur_parts)] fmt.Printf("DddGetNode: Removed first part: %v\n", cur_parts) } else { if position_location == "0" { // There are no other parts, so we have the data fmt.Printf("DddGetNode: First part is '0': %s\n", position_location) return "The Beginninging", cur_data, cur_record_data } else { // Asking for data which cannot exist. The first part can only be 0 fmt.Printf("DddGetNode: First part is only part, and isnt '0': %s\n", position_location) return "The Somethingelseinging", nil, nil } } // As long as we still have cur_parts, keep going. If we dont return in this block, we will have an empty result for len(cur_parts) > 0 { cur_pos, _ := strconv.Atoi(cur_parts[0]) fmt.Printf("DDD Move: Step: Parts: %v Current: %d Cur Node: %s Cursor Data: %s\n", cur_parts, cur_pos, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) cur_label, cur_data, cur_record_data = _DddGetNodeCurrent(cur_data, cur_record_data, cur_pos, processed_parts, cur_parts) // Add the part we just processed to our processed_parts slice to keep track of them processed_parts = append(processed_parts, cur_pos) // Pop off the first element, so we keep going if len(cur_parts) > 1 { cur_parts = cur_parts[1:len(cur_parts)] } else { cur_parts = make([]string, 0) } // If
{ //NOTE(g): This function doesnt check if the new position is valid, that is done by DddGet() which returns the DDD info at the current position (if valid, or nil parts := strings.Split(position_location, ".") fmt.Printf("DDD Move: Parts: %v\n", parts) // Only allow X or Y movement, not both. This isnt a video game. if move_x != 0 { if move_x == 1 { fmt.Printf("DDD Move: RIGHT\n") // Moving to the right, we just add a .0 to the current location return fmt.Sprintf("%s.0", position_location) } else { fmt.Printf("DDD Move: LEFT\n") if len(parts) > 1 { parts = parts[0:len(parts)-1] return strings.Join(parts, ".") } else { fmt.Printf("DDD Move: Cant move left\n")
identifier_body
ddd.go
track of them processed_parts = append(processed_parts, cur_pos) // Pop off the first element, so we keep going if len(cur_parts) > 1 { cur_parts = cur_parts[1:len(cur_parts)] } else { cur_parts = make([]string, 0) } // If we have nothing left to process, return the result if len(cur_parts) == 0 { fmt.Printf("DddGetNode: Result: %s: Node Data: %s Cursor Data: %s\n", position_location, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) return cur_label, cur_data, cur_record_data } else if cur_data["type"] != nil || cur_data["variadic"] != nil || cur_data["rowdict"] != nil { return cur_label, nil, nil } } // No data at this location, or we would have returned it already fmt.Printf("DddGetNode: No result, returning nil: %v\n", cur_parts) return "nil", nil, nil } func GetDddNodeSummary(cur_label string, cur_data map[string]interface{}) string { // This is our result, setting to unknown, which should never be displayed summary := "Unknown: FIX" if cur_data["keydict"] != nil { keys := MapKeys(cur_data["keydict"].(map[string]interface{})) summary = fmt.Sprintf("%s: KeyDict: %v", cur_label, strings.Join(keys, ", ")) } else if cur_data["rowdict"] != nil { keys := MapKeys(cur_data["rowdict"].(map[string]interface{})["switch_rows"].(map[string]interface{})) summary = fmt.Sprintf("%s: RowDict: Rows: %d: %v", cur_label, len(cur_data["rowdict"].(map[string]interface{})), strings.Join(keys, ", ")) } else if cur_data["list"] != nil { cur_list := cur_data["list"].([]interface{}) item_summary := make([]string, 0) for _, item := range cur_data["list"].([]interface{}) { item_summary = append(item_summary, GetDddNodeSummary("", item.(map[string]interface{}))) } item_summary_str := strings.Join(item_summary, ", ") summary = fmt.Sprintf("%s: List (%d): %s", cur_label, len(cur_list), item_summary_str) } else if cur_data["type"] != nil { summary = fmt.Sprintf("%s: Data Item: Type: %s", cur_label, cur_data["type"]) } else if cur_data["variadic"] != nil { summary = fmt.Sprintf("%s: Variadic", cur_label) } // Crop long summaries if len(summary) > 60 { summary = summary[0:60] } return summary } func GetFieldMapFromSpec(data map[string]interface{}, label string, name string) map[string]interface{} { field_map := make(map[string]interface{}) if data["type"] == "string" || data["type"] == "int" || data["type"] == "boolean" { icon := "icon-make-group" if data["icon"] != nil { icon = data["icon"].(string) } size := 12 if data["size"] != nil { size = int(data["size"].(float64)) } field_map = map[string]interface{}{ "color": "primary", "icon": icon, "info": "", "label": label, "name": name, "placeholder": "", "size": size, "type": "text", "value": "", } } return field_map } func DddRenderNode(position_location string, ddd_id int64, temp_id int64, ddd_label string, ddd_node map[string]interface{}, ddd_cursor_data interface{}) []interface{} { rows := make([]interface{}, 0) //// Add the current row, so we work with them //cur_row := make([]interface{}, 0) //rows = append(rows, cur_row) if ddd_node["type"] != nil { field_name := fmt.Sprintf("ddd_node_%s", position_location) new_html_field := GetFieldMapFromSpec(ddd_node, ddd_label, field_name) rows = AppendArray(rows, new_html_field) } else if ddd_node["keydict"] != nil { html_element_name := fmt.Sprintf("ddd_node_%s", position_location) // Keydict select fields, navs to them, so we dont have to button nav new_html_field := map[string]interface{}{ "color": "primary", "icon": "icon-make-group", "info": "", "label": ddd_label, "name": html_element_name, "placeholder": "", "size": "12", "type": "select", "value": "", "value_match":"select_option_match", "value_nomatch":"select_option_nomatch", "null_message": "- Select to Navigate -", "items": fmt.Sprintf("__input.%s", MapKeysToUdnMapForHtmlSelect(position_location, ddd_node["keydict"].(map[string]interface{}))), "onchange": fmt.Sprintf("$(this).closest('.ui-dialog-content').dialog('close'); RPC('/api/dwi_render_ddd', {'move_x': 0, 'move_y': 0, 'position_location': $(this).val(), 'ddd_id': %d, 'is_delete': 0, 'web_data_widget_instance_id': '{{{_id}}}', 'web_widget_instance_id': '{{{web_widget_instance_id}}}', '_web_data_widget_instance_id': 34, 'dom_target_id':'dialog_target', 'temp_id': %d})", ddd_id, temp_id), } rows = AppendArray(rows, new_html_field) } else if ddd_node["list"] != nil { map_values := make([]string, 0) for index, data := range ddd_node["list"].([]interface{}) { summary := GetDddNodeSummary(ddd_label, data.(map[string]interface{})) new_position := fmt.Sprintf("%s.%d", position_location, index) map_values = append(map_values, fmt.Sprintf("{name='%s',value='%s'}", summary, new_position)) } map_value_str := strings.Join(map_values, ",") udn_final := fmt.Sprintf("[%s]", map_value_str) html_element_name := fmt.Sprintf("ddd_node_%s", position_location) // Keydict select fields, navs to them, so we dont have to button nav new_html_field := map[string]interface{}{ "color": "primary", "icon": "icon-make-group", "info": "", "label": ddd_label, "name": html_element_name, "placeholder": "", "size": "12", "type": "select", "value": "", "value_match":"select_option_match", "value_nomatch":"select_option_nomatch", "null_message": "- Select to Navigate -", "items": fmt.Sprintf("__input.%s", udn_final), "onchange": fmt.Sprintf("$(this).closest('.ui-dialog-content').dialog('close'); RPC('/api/dwi_render_ddd', {'move_x': 0, 'move_y': 0, 'position_location': $(this).val(), 'ddd_id': %d, 'is_delete': 0, 'web_data_widget_instance_id': '{{{_id}}}', 'web_widget_instance_id': '{{{web_widget_instance_id}}}', '_web_data_widget_instance_id': 34, 'dom_target_id':'dialog_target', 'temp_id': %d})", ddd_id, temp_id), } rows = AppendArray(rows, new_html_field) } else if ddd_node["rowdict"] != nil { // Sort by rows and columns, if available, if not, sort them and put them at the end, 1 per row unsorted := make([]map[string]interface{}, 0) layout := make(map[int]map[int]map[string]interface{}) //TODO(g): We will assume data initially, so we can start up data_switch_field := "text" // Select the spec from the switch_field selected_row_dict_spec := ddd_node["rowdict"].(map[string]interface{})["switch_rows"].(map[string]interface{})[data_switch_field].(map[string]interface{}) for key, value := range selected_row_dict_spec { value_map := value.(map[string]interface{}) new_item := make(map[string]interface{}) new_item[key] = value if value_map["x"] != nil && value_map["y"] != nil
{ // Put them in Y first, because we care about ordering by rows first, then columns once in a specific row if layout[int(value_map["y"].(float64))] == nil { layout[int(value_map["y"].(float64))] = make(map[int]map[string]interface{}) } layout[int(value_map["y"].(float64))][int(value_map["x"].(float64))] = new_item }
conditional_block
ddd.go
T\n", cur_data["list"]) cur_data_list := cur_data["list"].([]interface{}) // Using the cur_pos as the index offset, this works up until the "variadic" node (if present) if cur_pos >= 0 && cur_pos < len(cur_data_list) { result_cur_data := cur_data_list[cur_pos].(map[string]interface{}) var result_cur_record_data interface{} cur_record_data_array := GetResult(cur_record_data, type_array).([]interface{}) if len(cur_record_data_array) > cur_pos { result_cur_record_data = cur_record_data_array[cur_pos] } else { result_cur_record_data = nil } return fmt.Sprintf("Index: %d", cur_pos), result_cur_data, result_cur_record_data } else { return "nil", nil, nil } } else if cur_data["type"] != nil { // This is a raw data node, and should not have any indexing, only "0" for it's location position if cur_pos == 0 { return "TBD: Get Label", cur_data, cur_record_data } else { return "nil", nil, nil } } else if cur_data["variadic"] != nil { // I think I have to backtrack to a previous node then? Parent node? if cur_pos == 0 { return fmt.Sprintf("Variadic: %d", cur_pos), cur_data, cur_record_data } else { return "nil", nil, nil } } else { //TODO(g): Replace this panic with a non-fatal error... But the DDD is bad, so report it? //panic(fmt.Sprintf("Unknown DDD node: %v", cur_data)) return "nil", nil, nil } return "Unknown", cur_data, cur_record_data } func DddGetNode(position_location string, ddd_data map[string]interface{}, data_record interface{}, udn_data map[string]interface{}) (string, map[string]interface{}, interface{}) { cur_parts := strings.Split(position_location, ".") cur_label := "" fmt.Printf("DDD Get Node: Parts: %s: %v\n", position_location, cur_parts) // Current position starts from ddd_data, and then we navigate it, and return it when we find the node cur_data := ddd_data cur_record_data := data_record processed_parts := make([]int, 0) // The first "0" is always "0", and is the base cur_data, so let's pop it off if len(cur_parts) > 1 { // Add the part we just processed to our processed_parts slice to keep track of them cur_pos, _ := strconv.Atoi(cur_parts[0]) processed_parts = append(processed_parts, cur_pos) fmt.Printf("DddGetNode: Removing first part: %v\n", cur_parts) cur_parts = cur_parts[1:len(cur_parts)] fmt.Printf("DddGetNode: Removed first part: %v\n", cur_parts) } else { if position_location == "0" { // There are no other parts, so we have the data fmt.Printf("DddGetNode: First part is '0': %s\n", position_location) return "The Beginninging", cur_data, cur_record_data } else { // Asking for data which cannot exist. The first part can only be 0 fmt.Printf("DddGetNode: First part is only part, and isnt '0': %s\n", position_location) return "The Somethingelseinging", nil, nil } } // As long as we still have cur_parts, keep going. If we dont return in this block, we will have an empty result for len(cur_parts) > 0 { cur_pos, _ := strconv.Atoi(cur_parts[0]) fmt.Printf("DDD Move: Step: Parts: %v Current: %d Cur Node: %s Cursor Data: %s\n", cur_parts, cur_pos, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) cur_label, cur_data, cur_record_data = _DddGetNodeCurrent(cur_data, cur_record_data, cur_pos, processed_parts, cur_parts) // Add the part we just processed to our processed_parts slice to keep track of them processed_parts = append(processed_parts, cur_pos) // Pop off the first element, so we keep going if len(cur_parts) > 1 { cur_parts = cur_parts[1:len(cur_parts)] } else { cur_parts = make([]string, 0) } // If we have nothing left to process, return the result if len(cur_parts) == 0 { fmt.Printf("DddGetNode: Result: %s: Node Data: %s Cursor Data: %s\n", position_location, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) return cur_label, cur_data, cur_record_data } else if cur_data["type"] != nil || cur_data["variadic"] != nil || cur_data["rowdict"] != nil { return cur_label, nil, nil } } // No data at this location, or we would have returned it already fmt.Printf("DddGetNode: No result, returning nil: %v\n", cur_parts) return "nil", nil, nil } func GetDddNodeSummary(cur_label string, cur_data map[string]interface{}) string { // This is our result, setting to unknown, which should never be displayed summary := "Unknown: FIX" if cur_data["keydict"] != nil { keys := MapKeys(cur_data["keydict"].(map[string]interface{})) summary = fmt.Sprintf("%s: KeyDict: %v", cur_label, strings.Join(keys, ", ")) } else if cur_data["rowdict"] != nil { keys := MapKeys(cur_data["rowdict"].(map[string]interface{})["switch_rows"].(map[string]interface{})) summary = fmt.Sprintf("%s: RowDict: Rows: %d: %v", cur_label, len(cur_data["rowdict"].(map[string]interface{})), strings.Join(keys, ", ")) } else if cur_data["list"] != nil { cur_list := cur_data["list"].([]interface{}) item_summary := make([]string, 0) for _, item := range cur_data["list"].([]interface{}) { item_summary = append(item_summary, GetDddNodeSummary("", item.(map[string]interface{}))) } item_summary_str := strings.Join(item_summary, ", ") summary = fmt.Sprintf("%s: List (%d): %s", cur_label, len(cur_list), item_summary_str) } else if cur_data["type"] != nil { summary = fmt.Sprintf("%s: Data Item: Type: %s", cur_label, cur_data["type"]) } else if cur_data["variadic"] != nil { summary = fmt.Sprintf("%s: Variadic", cur_label) } // Crop long summaries if len(summary) > 60 { summary = summary[0:60] } return summary } func GetFieldMapFromSpec(data map[string]interface{}, label string, name string) map[string]interface{} { field_map := make(map[string]interface{}) if data["type"] == "string" || data["type"] == "int" || data["type"] == "boolean" { icon := "icon-make-group" if data["icon"] != nil { icon = data["icon"].(string) } size := 12 if data["size"] != nil { size = int(data["size"].(float64)) } field_map = map[string]interface{}{ "color": "primary", "icon": icon, "info": "", "label": label, "name": name, "placeholder": "", "size": size, "type": "text", "value": "", } } return field_map } func DddRenderNode(position_location string, ddd_id int64, temp_id int64, ddd_label string, ddd_node map[string]interface{}, ddd_cursor_data interface{}) []interface{} { rows := make([]interface{}, 0) //// Add the current row, so we work with them //cur_row := make([]interface{}, 0) //rows = append(rows, cur_row) if ddd_node["type"] != nil { field_name := fmt.Sprintf("ddd_node_%s", position_location) new_html_field := GetFieldMapFromSpec(ddd_node, ddd_label, field_name) rows = AppendArray(rows, new_html_field) } else if ddd_node["keydict"] != nil { html_element_name := fmt.Sprintf("ddd_node_%s", position_location) // Keydict select fields, navs to them, so we dont have to button nav new_html_field := map[string]interface{}{ "color": "primary", "icon": "icon-make-group", "info": "",
"label": ddd_label, "name": html_element_name,
random_line_split
ddd.go
"nil", nil, nil } selected_key := keys[cur_pos] fmt.Printf("DddGetNodeCurrent: keydict: Selected Key: %s\n", selected_key) result_cur_data := cur_data["keydict"].(map[string]interface{})[selected_key].(map[string]interface{}) cur_record_data_map := GetResult(cur_record_data, type_map).(map[string]interface{}) result_cur_record_data := make(map[string]interface{}) if cur_record_data_map[selected_key] != nil { result_cur_record_data = GetResult(cur_record_data_map[selected_key], type_map).(map[string]interface{}) } return fmt.Sprintf("Key: %s", selected_key), result_cur_data, result_cur_record_data } else if cur_data["rowdict"] != nil { // The rowdict is inside a list, but must be further selected based on the selection field, which will determine the node //TODO(g): ... return "RowDict", cur_data, cur_record_data } else if cur_data["list"] != nil { fmt.Printf("DDDGET:LIST: %T\n", cur_data["list"]) cur_data_list := cur_data["list"].([]interface{}) // Using the cur_pos as the index offset, this works up until the "variadic" node (if present) if cur_pos >= 0 && cur_pos < len(cur_data_list) { result_cur_data := cur_data_list[cur_pos].(map[string]interface{}) var result_cur_record_data interface{} cur_record_data_array := GetResult(cur_record_data, type_array).([]interface{}) if len(cur_record_data_array) > cur_pos { result_cur_record_data = cur_record_data_array[cur_pos] } else { result_cur_record_data = nil } return fmt.Sprintf("Index: %d", cur_pos), result_cur_data, result_cur_record_data } else { return "nil", nil, nil } } else if cur_data["type"] != nil { // This is a raw data node, and should not have any indexing, only "0" for it's location position if cur_pos == 0 { return "TBD: Get Label", cur_data, cur_record_data } else { return "nil", nil, nil } } else if cur_data["variadic"] != nil { // I think I have to backtrack to a previous node then? Parent node? if cur_pos == 0 { return fmt.Sprintf("Variadic: %d", cur_pos), cur_data, cur_record_data } else { return "nil", nil, nil } } else { //TODO(g): Replace this panic with a non-fatal error... But the DDD is bad, so report it? //panic(fmt.Sprintf("Unknown DDD node: %v", cur_data)) return "nil", nil, nil } return "Unknown", cur_data, cur_record_data } func DddGetNode(position_location string, ddd_data map[string]interface{}, data_record interface{}, udn_data map[string]interface{}) (string, map[string]interface{}, interface{}) { cur_parts := strings.Split(position_location, ".") cur_label := "" fmt.Printf("DDD Get Node: Parts: %s: %v\n", position_location, cur_parts) // Current position starts from ddd_data, and then we navigate it, and return it when we find the node cur_data := ddd_data cur_record_data := data_record processed_parts := make([]int, 0) // The first "0" is always "0", and is the base cur_data, so let's pop it off if len(cur_parts) > 1 { // Add the part we just processed to our processed_parts slice to keep track of them cur_pos, _ := strconv.Atoi(cur_parts[0]) processed_parts = append(processed_parts, cur_pos) fmt.Printf("DddGetNode: Removing first part: %v\n", cur_parts) cur_parts = cur_parts[1:len(cur_parts)] fmt.Printf("DddGetNode: Removed first part: %v\n", cur_parts) } else { if position_location == "0" { // There are no other parts, so we have the data fmt.Printf("DddGetNode: First part is '0': %s\n", position_location) return "The Beginninging", cur_data, cur_record_data } else { // Asking for data which cannot exist. The first part can only be 0 fmt.Printf("DddGetNode: First part is only part, and isnt '0': %s\n", position_location) return "The Somethingelseinging", nil, nil } } // As long as we still have cur_parts, keep going. If we dont return in this block, we will have an empty result for len(cur_parts) > 0 { cur_pos, _ := strconv.Atoi(cur_parts[0]) fmt.Printf("DDD Move: Step: Parts: %v Current: %d Cur Node: %s Cursor Data: %s\n", cur_parts, cur_pos, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) cur_label, cur_data, cur_record_data = _DddGetNodeCurrent(cur_data, cur_record_data, cur_pos, processed_parts, cur_parts) // Add the part we just processed to our processed_parts slice to keep track of them processed_parts = append(processed_parts, cur_pos) // Pop off the first element, so we keep going if len(cur_parts) > 1 { cur_parts = cur_parts[1:len(cur_parts)] } else { cur_parts = make([]string, 0) } // If we have nothing left to process, return the result if len(cur_parts) == 0 { fmt.Printf("DddGetNode: Result: %s: Node Data: %s Cursor Data: %s\n", position_location, SnippetData(cur_data, 80), SnippetData(cur_record_data, 80)) return cur_label, cur_data, cur_record_data } else if cur_data["type"] != nil || cur_data["variadic"] != nil || cur_data["rowdict"] != nil { return cur_label, nil, nil } } // No data at this location, or we would have returned it already fmt.Printf("DddGetNode: No result, returning nil: %v\n", cur_parts) return "nil", nil, nil } func GetDddNodeSummary(cur_label string, cur_data map[string]interface{}) string { // This is our result, setting to unknown, which should never be displayed summary := "Unknown: FIX" if cur_data["keydict"] != nil { keys := MapKeys(cur_data["keydict"].(map[string]interface{})) summary = fmt.Sprintf("%s: KeyDict: %v", cur_label, strings.Join(keys, ", ")) } else if cur_data["rowdict"] != nil { keys := MapKeys(cur_data["rowdict"].(map[string]interface{})["switch_rows"].(map[string]interface{})) summary = fmt.Sprintf("%s: RowDict: Rows: %d: %v", cur_label, len(cur_data["rowdict"].(map[string]interface{})), strings.Join(keys, ", ")) } else if cur_data["list"] != nil { cur_list := cur_data["list"].([]interface{}) item_summary := make([]string, 0) for _, item := range cur_data["list"].([]interface{}) { item_summary = append(item_summary, GetDddNodeSummary("", item.(map[string]interface{}))) } item_summary_str := strings.Join(item_summary, ", ") summary = fmt.Sprintf("%s: List (%d): %s", cur_label, len(cur_list), item_summary_str) } else if cur_data["type"] != nil { summary = fmt.Sprintf("%s: Data Item: Type: %s", cur_label, cur_data["type"]) } else if cur_data["variadic"] != nil { summary = fmt.Sprintf("%s: Variadic", cur_label) } // Crop long summaries if len(summary) > 60 { summary = summary[0:60] } return summary } func GetFieldMapFromSpec(data map[string]interface{}, label string, name string) map[string]interface{} { field_map := make(map[string]interface{}) if data["type"] == "string" || data["type"] == "int" || data["type"] == "boolean" { icon := "icon-make-group" if data["icon"] != nil { icon = data["icon"].(string) } size := 12 if data["size"] != nil { size = int(data["size"].(float64)) } field_map = map[string]interface{}{ "color": "primary", "icon": icon, "info": "", "label": label, "name": name, "placeholder": "", "size": size, "type": "text", "value": "", } } return field_map } func
DddRenderNode
identifier_name
sh_commands.py
> 0] def make_output_processor(buff): def process_output(line): print(line.rstrip()) buff.append(line) return process_output def device_lock_path(serialno): return "/tmp/device-lock-%s" % serialno def device_lock(serialno, timeout=3600): return filelock.FileLock(device_lock_path(serialno), timeout=timeout) def adb_devices(): serialnos = [] p = re.compile(r'(\w+)\s+device') for line in split_stdout(sh.adb("devices")): m = p.match(line) if m: serialnos.append(m.group(1)) return serialnos def adb_getprop_by_serialno(serialno): outputs = sh.adb("-s", serialno, "shell", "getprop") raw_props = split_stdout(outputs) props = {} p = re.compile(r'\[(.+)\]: \[(.+)\]') for raw_prop in raw_props: m = p.match(raw_prop) if m: props[m.group(1)] = m.group(2) return props def adb_supported_abis(serialno): props = adb_getprop_by_serialno(serialno) abilist_str = props["ro.product.cpu.abilist"] abis = [abi.strip() for abi in abilist_str.split(',')] return abis def file_checksum(fname): hash_func = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_func.update(chunk) return hash_func.hexdigest() def adb_push_file(src_file, dst_dir, serialno): src_checksum = file_checksum(src_file) dst_file = os.path.join(dst_dir, os.path.basename(src_file)) stdout_buff = [] sh.adb("-s", serialno, "shell", "md5sum", dst_file, _out=lambda line: stdout_buff.append(line)) dst_checksum = stdout_buff[0].split()[0] if src_checksum == dst_checksum: print("Equal checksum with %s and %s" % (src_file, dst_file)) else: print("Push %s to %s" % (src_file, dst_dir)) sh.adb("-s", serialno, "push", src_file, dst_dir) def adb_push(src_path, dst_dir, serialno): if os.path.isdir(src_path): for src_file in os.listdir(src_path): adb_push_file(os.path.join(src_path, src_file), dst_dir, serialno) else: adb_push_file(src_path, dst_dir, serialno) def get_soc_serialnos_map(): serialnos = adb_devices() soc_serialnos_map = {} for serialno in serialnos: props = adb_getprop_by_serialno(serialno) soc_serialnos_map.setdefault(props["ro.board.platform"], []) \ .append(serialno) return soc_serialnos_map def get_target_socs_serialnos(target_socs=None): soc_serialnos_map = get_soc_serialnos_map() serialnos = [] if target_socs is None: target_socs = soc_serialnos_map.keys() for target_soc in target_socs: serialnos.extend(soc_serialnos_map[target_soc]) return serialnos def download_file(configs, file_name, output_dir): file_path = output_dir + "/" + file_name url = configs[file_name] checksum = configs[file_name + "_md5_checksum"] if not os.path.exists(file_path) or file_checksum(file_path) != checksum: print("downloading %s..." % file_name) urllib.urlretrieve(url, file_path) if file_checksum(file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) return file_path def get_mace(configs, abis, output_dir, build_mace): if build_mace: sh.bash("tools/build_mace.sh", abis, os.path.abspath(output_dir), _fg=True) else: file_path = download_file(configs, "libmace.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/mace") def get_tflite(configs, output_dir):
def bazel_build(target, abi="armeabi-v7a", frameworks=None): print("* Build %s with ABI %s" % (target, abi)) if abi == "host": bazel_args = ( "build", target, ) else: bazel_args = ( "build", target, "--config", "android", "--cpu=%s" % abi, "--action_env=ANDROID_NDK_HOME=%s" % os.environ["ANDROID_NDK_HOME"], ) for framework in frameworks: bazel_args += ("--define", "%s=true" % framework.lower()) sh.bazel( _fg=True, *bazel_args) print("Build done!\n") def bazel_target_to_bin(target): # change //aibench/a/b:c to bazel-bin/aibench/a/b/c prefix, bin_name = target.split(':') prefix = prefix.replace('//', '/') if prefix.startswith('/'): prefix = prefix[1:] host_bin_path = "bazel-bin/%s" % prefix return host_bin_path, bin_name def prepare_device_env(serialno, abi, device_bin_path, frameworks): # for snpe if "SNPE" in frameworks and abi == "armeabi-v7a": snpe_lib_path = \ "bazel-mobile-ai-bench/external/snpe/lib/arm-android-gcc4.9" adb_push("bazel-mobile-ai-bench/external/snpe/lib/dsp", device_bin_path, serialno) if snpe_lib_path: adb_push(snpe_lib_path, device_bin_path, serialno) libgnustl_path = os.environ["ANDROID_NDK_HOME"] + \ "/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/" \ "libgnustl_shared.so" % abi adb_push(libgnustl_path, device_bin_path, serialno) # for mace if "MACE" in frameworks and abi == "armeabi-v7a": adb_push("third_party/nnlib/libhexagon_controller.so", device_bin_path, serialno) # for tflite if "TFLITE" in frameworks: tflite_lib_path = "" if abi == "armeabi-v7a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/armeabi-v7a/libtensorflowLite.so" elif abi == "arm64-v8a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/arm64-v8a/libtensorflowLite.so" if tflite_lib_path: adb_push(tflite_lib_path, device_bin_path, serialno) def prepare_model_and_input(serialno, models_inputs, device_bin_path, output_dir): file_names = [f for f in models_inputs if not f.endswith("_md5_checksum")] for file_name in file_names: file_path = models_inputs[file_name] local_file_path = file_path if file_path.startswith("http"): local_file_path = \ download_file(models_inputs, file_name, output_dir) else: checksum = models_inputs[file_name + "_md5_checksum"] if file_checksum(local_file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) adb_push(local_file_path, device_bin_path, serialno) def prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir, frameworks, build_mace): models_inputs = configs["models_and_inputs"] if "MACE" in frameworks: if build_mace: # mace model files are generated from source for model_file in os.listdir(output_dir): if model_file.endswith(".pb") or model_file.endswith(".data"): model_file_path = output_dir + '/' + model_file adb_push(model_file_path, device_bin_path, serialno) else: prepare_model_and_input(serialno, models_inputs["MACE"], device_bin_path, output_dir) if "SNPE" in frameworks: prepare_model_and_input(serialno, models_inputs["SNPE"], device_bin_path, output_dir) if "TFLITE" in frameworks: prepare_model_and_input(serialno, models_inputs["TFLITE"], device_bin_path, output_dir) # ncnn model files are generated from source if "NCNN" in frameworks: ncnn_model_path = "bazel-genfiles/external/ncnn/models/" adb_push(ncnn_model_path, device_bin_path, serialno) prepare_model_and_input(serialno, models_inputs["NCNN"], device_bin_path, output_dir) def adb_run(abi, serialno, configs, host_bin_path,
file_path = download_file(configs, "tensorflow-1.9.0-rc1.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/tflite")
identifier_body
sh_commands.py
> 0] def make_output_processor(buff): def process_output(line): print(line.rstrip()) buff.append(line) return process_output def device_lock_path(serialno): return "/tmp/device-lock-%s" % serialno def device_lock(serialno, timeout=3600): return filelock.FileLock(device_lock_path(serialno), timeout=timeout) def adb_devices(): serialnos = [] p = re.compile(r'(\w+)\s+device') for line in split_stdout(sh.adb("devices")): m = p.match(line) if m: serialnos.append(m.group(1)) return serialnos def adb_getprop_by_serialno(serialno): outputs = sh.adb("-s", serialno, "shell", "getprop") raw_props = split_stdout(outputs) props = {} p = re.compile(r'\[(.+)\]: \[(.+)\]') for raw_prop in raw_props: m = p.match(raw_prop) if m: props[m.group(1)] = m.group(2) return props def adb_supported_abis(serialno): props = adb_getprop_by_serialno(serialno) abilist_str = props["ro.product.cpu.abilist"] abis = [abi.strip() for abi in abilist_str.split(',')] return abis def file_checksum(fname): hash_func = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_func.update(chunk) return hash_func.hexdigest() def
(src_file, dst_dir, serialno): src_checksum = file_checksum(src_file) dst_file = os.path.join(dst_dir, os.path.basename(src_file)) stdout_buff = [] sh.adb("-s", serialno, "shell", "md5sum", dst_file, _out=lambda line: stdout_buff.append(line)) dst_checksum = stdout_buff[0].split()[0] if src_checksum == dst_checksum: print("Equal checksum with %s and %s" % (src_file, dst_file)) else: print("Push %s to %s" % (src_file, dst_dir)) sh.adb("-s", serialno, "push", src_file, dst_dir) def adb_push(src_path, dst_dir, serialno): if os.path.isdir(src_path): for src_file in os.listdir(src_path): adb_push_file(os.path.join(src_path, src_file), dst_dir, serialno) else: adb_push_file(src_path, dst_dir, serialno) def get_soc_serialnos_map(): serialnos = adb_devices() soc_serialnos_map = {} for serialno in serialnos: props = adb_getprop_by_serialno(serialno) soc_serialnos_map.setdefault(props["ro.board.platform"], []) \ .append(serialno) return soc_serialnos_map def get_target_socs_serialnos(target_socs=None): soc_serialnos_map = get_soc_serialnos_map() serialnos = [] if target_socs is None: target_socs = soc_serialnos_map.keys() for target_soc in target_socs: serialnos.extend(soc_serialnos_map[target_soc]) return serialnos def download_file(configs, file_name, output_dir): file_path = output_dir + "/" + file_name url = configs[file_name] checksum = configs[file_name + "_md5_checksum"] if not os.path.exists(file_path) or file_checksum(file_path) != checksum: print("downloading %s..." % file_name) urllib.urlretrieve(url, file_path) if file_checksum(file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) return file_path def get_mace(configs, abis, output_dir, build_mace): if build_mace: sh.bash("tools/build_mace.sh", abis, os.path.abspath(output_dir), _fg=True) else: file_path = download_file(configs, "libmace.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/mace") def get_tflite(configs, output_dir): file_path = download_file(configs, "tensorflow-1.9.0-rc1.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/tflite") def bazel_build(target, abi="armeabi-v7a", frameworks=None): print("* Build %s with ABI %s" % (target, abi)) if abi == "host": bazel_args = ( "build", target, ) else: bazel_args = ( "build", target, "--config", "android", "--cpu=%s" % abi, "--action_env=ANDROID_NDK_HOME=%s" % os.environ["ANDROID_NDK_HOME"], ) for framework in frameworks: bazel_args += ("--define", "%s=true" % framework.lower()) sh.bazel( _fg=True, *bazel_args) print("Build done!\n") def bazel_target_to_bin(target): # change //aibench/a/b:c to bazel-bin/aibench/a/b/c prefix, bin_name = target.split(':') prefix = prefix.replace('//', '/') if prefix.startswith('/'): prefix = prefix[1:] host_bin_path = "bazel-bin/%s" % prefix return host_bin_path, bin_name def prepare_device_env(serialno, abi, device_bin_path, frameworks): # for snpe if "SNPE" in frameworks and abi == "armeabi-v7a": snpe_lib_path = \ "bazel-mobile-ai-bench/external/snpe/lib/arm-android-gcc4.9" adb_push("bazel-mobile-ai-bench/external/snpe/lib/dsp", device_bin_path, serialno) if snpe_lib_path: adb_push(snpe_lib_path, device_bin_path, serialno) libgnustl_path = os.environ["ANDROID_NDK_HOME"] + \ "/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/" \ "libgnustl_shared.so" % abi adb_push(libgnustl_path, device_bin_path, serialno) # for mace if "MACE" in frameworks and abi == "armeabi-v7a": adb_push("third_party/nnlib/libhexagon_controller.so", device_bin_path, serialno) # for tflite if "TFLITE" in frameworks: tflite_lib_path = "" if abi == "armeabi-v7a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/armeabi-v7a/libtensorflowLite.so" elif abi == "arm64-v8a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/arm64-v8a/libtensorflowLite.so" if tflite_lib_path: adb_push(tflite_lib_path, device_bin_path, serialno) def prepare_model_and_input(serialno, models_inputs, device_bin_path, output_dir): file_names = [f for f in models_inputs if not f.endswith("_md5_checksum")] for file_name in file_names: file_path = models_inputs[file_name] local_file_path = file_path if file_path.startswith("http"): local_file_path = \ download_file(models_inputs, file_name, output_dir) else: checksum = models_inputs[file_name + "_md5_checksum"] if file_checksum(local_file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) adb_push(local_file_path, device_bin_path, serialno) def prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir, frameworks, build_mace): models_inputs = configs["models_and_inputs"] if "MACE" in frameworks: if build_mace: # mace model files are generated from source for model_file in os.listdir(output_dir): if model_file.endswith(".pb") or model_file.endswith(".data"): model_file_path = output_dir + '/' + model_file adb_push(model_file_path, device_bin_path, serialno) else: prepare_model_and_input(serialno, models_inputs["MACE"], device_bin_path, output_dir) if "SNPE" in frameworks: prepare_model_and_input(serialno, models_inputs["SNPE"], device_bin_path, output_dir) if "TFLITE" in frameworks: prepare_model_and_input(serialno, models_inputs["TFLITE"], device_bin_path, output_dir) # ncnn model files are generated from source if "NCNN" in frameworks: ncnn_model_path = "bazel-genfiles/external/ncnn/models/" adb_push(ncnn_model_path, device_bin_path, serialno) prepare_model_and_input(serialno, models_inputs["NCNN"], device_bin_path, output_dir) def adb_run(abi, serialno, configs, host_bin_path,
adb_push_file
identifier_name
sh_commands.py
()) > 0] def make_output_processor(buff): def process_output(line): print(line.rstrip()) buff.append(line) return process_output def device_lock_path(serialno): return "/tmp/device-lock-%s" % serialno def device_lock(serialno, timeout=3600): return filelock.FileLock(device_lock_path(serialno), timeout=timeout) def adb_devices(): serialnos = [] p = re.compile(r'(\w+)\s+device') for line in split_stdout(sh.adb("devices")): m = p.match(line) if m: serialnos.append(m.group(1)) return serialnos def adb_getprop_by_serialno(serialno): outputs = sh.adb("-s", serialno, "shell", "getprop") raw_props = split_stdout(outputs) props = {} p = re.compile(r'\[(.+)\]: \[(.+)\]') for raw_prop in raw_props: m = p.match(raw_prop) if m: props[m.group(1)] = m.group(2) return props def adb_supported_abis(serialno):
abis = [abi.strip() for abi in abilist_str.split(',')] return abis def file_checksum(fname): hash_func = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_func.update(chunk) return hash_func.hexdigest() def adb_push_file(src_file, dst_dir, serialno): src_checksum = file_checksum(src_file) dst_file = os.path.join(dst_dir, os.path.basename(src_file)) stdout_buff = [] sh.adb("-s", serialno, "shell", "md5sum", dst_file, _out=lambda line: stdout_buff.append(line)) dst_checksum = stdout_buff[0].split()[0] if src_checksum == dst_checksum: print("Equal checksum with %s and %s" % (src_file, dst_file)) else: print("Push %s to %s" % (src_file, dst_dir)) sh.adb("-s", serialno, "push", src_file, dst_dir) def adb_push(src_path, dst_dir, serialno): if os.path.isdir(src_path): for src_file in os.listdir(src_path): adb_push_file(os.path.join(src_path, src_file), dst_dir, serialno) else: adb_push_file(src_path, dst_dir, serialno) def get_soc_serialnos_map(): serialnos = adb_devices() soc_serialnos_map = {} for serialno in serialnos: props = adb_getprop_by_serialno(serialno) soc_serialnos_map.setdefault(props["ro.board.platform"], []) \ .append(serialno) return soc_serialnos_map def get_target_socs_serialnos(target_socs=None): soc_serialnos_map = get_soc_serialnos_map() serialnos = [] if target_socs is None: target_socs = soc_serialnos_map.keys() for target_soc in target_socs: serialnos.extend(soc_serialnos_map[target_soc]) return serialnos def download_file(configs, file_name, output_dir): file_path = output_dir + "/" + file_name url = configs[file_name] checksum = configs[file_name + "_md5_checksum"] if not os.path.exists(file_path) or file_checksum(file_path) != checksum: print("downloading %s..." % file_name) urllib.urlretrieve(url, file_path) if file_checksum(file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) return file_path def get_mace(configs, abis, output_dir, build_mace): if build_mace: sh.bash("tools/build_mace.sh", abis, os.path.abspath(output_dir), _fg=True) else: file_path = download_file(configs, "libmace.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/mace") def get_tflite(configs, output_dir): file_path = download_file(configs, "tensorflow-1.9.0-rc1.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/tflite") def bazel_build(target, abi="armeabi-v7a", frameworks=None): print("* Build %s with ABI %s" % (target, abi)) if abi == "host": bazel_args = ( "build", target, ) else: bazel_args = ( "build", target, "--config", "android", "--cpu=%s" % abi, "--action_env=ANDROID_NDK_HOME=%s" % os.environ["ANDROID_NDK_HOME"], ) for framework in frameworks: bazel_args += ("--define", "%s=true" % framework.lower()) sh.bazel( _fg=True, *bazel_args) print("Build done!\n") def bazel_target_to_bin(target): # change //aibench/a/b:c to bazel-bin/aibench/a/b/c prefix, bin_name = target.split(':') prefix = prefix.replace('//', '/') if prefix.startswith('/'): prefix = prefix[1:] host_bin_path = "bazel-bin/%s" % prefix return host_bin_path, bin_name def prepare_device_env(serialno, abi, device_bin_path, frameworks): # for snpe if "SNPE" in frameworks and abi == "armeabi-v7a": snpe_lib_path = \ "bazel-mobile-ai-bench/external/snpe/lib/arm-android-gcc4.9" adb_push("bazel-mobile-ai-bench/external/snpe/lib/dsp", device_bin_path, serialno) if snpe_lib_path: adb_push(snpe_lib_path, device_bin_path, serialno) libgnustl_path = os.environ["ANDROID_NDK_HOME"] + \ "/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/" \ "libgnustl_shared.so" % abi adb_push(libgnustl_path, device_bin_path, serialno) # for mace if "MACE" in frameworks and abi == "armeabi-v7a": adb_push("third_party/nnlib/libhexagon_controller.so", device_bin_path, serialno) # for tflite if "TFLITE" in frameworks: tflite_lib_path = "" if abi == "armeabi-v7a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/armeabi-v7a/libtensorflowLite.so" elif abi == "arm64-v8a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/arm64-v8a/libtensorflowLite.so" if tflite_lib_path: adb_push(tflite_lib_path, device_bin_path, serialno) def prepare_model_and_input(serialno, models_inputs, device_bin_path, output_dir): file_names = [f for f in models_inputs if not f.endswith("_md5_checksum")] for file_name in file_names: file_path = models_inputs[file_name] local_file_path = file_path if file_path.startswith("http"): local_file_path = \ download_file(models_inputs, file_name, output_dir) else: checksum = models_inputs[file_name + "_md5_checksum"] if file_checksum(local_file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) adb_push(local_file_path, device_bin_path, serialno) def prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir, frameworks, build_mace): models_inputs = configs["models_and_inputs"] if "MACE" in frameworks: if build_mace: # mace model files are generated from source for model_file in os.listdir(output_dir): if model_file.endswith(".pb") or model_file.endswith(".data"): model_file_path = output_dir + '/' + model_file adb_push(model_file_path, device_bin_path, serialno) else: prepare_model_and_input(serialno, models_inputs["MACE"], device_bin_path, output_dir) if "SNPE" in frameworks: prepare_model_and_input(serialno, models_inputs["SNPE"], device_bin_path, output_dir) if "TFLITE" in frameworks: prepare_model_and_input(serialno, models_inputs["TFLITE"], device_bin_path, output_dir) # ncnn model files are generated from source if "NCNN" in frameworks: ncnn_model_path = "bazel-genfiles/external/ncnn/models/" adb_push(ncnn_model_path, device_bin_path, serialno) prepare_model_and_input(serialno, models_inputs["NCNN"], device_bin_path, output_dir) def adb_run(abi, serialno, configs, host_bin_path,
props = adb_getprop_by_serialno(serialno) abilist_str = props["ro.product.cpu.abilist"]
random_line_split
sh_commands.py
nos(target_socs=None): soc_serialnos_map = get_soc_serialnos_map() serialnos = [] if target_socs is None: target_socs = soc_serialnos_map.keys() for target_soc in target_socs: serialnos.extend(soc_serialnos_map[target_soc]) return serialnos def download_file(configs, file_name, output_dir): file_path = output_dir + "/" + file_name url = configs[file_name] checksum = configs[file_name + "_md5_checksum"] if not os.path.exists(file_path) or file_checksum(file_path) != checksum: print("downloading %s..." % file_name) urllib.urlretrieve(url, file_path) if file_checksum(file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) return file_path def get_mace(configs, abis, output_dir, build_mace): if build_mace: sh.bash("tools/build_mace.sh", abis, os.path.abspath(output_dir), _fg=True) else: file_path = download_file(configs, "libmace.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/mace") def get_tflite(configs, output_dir): file_path = download_file(configs, "tensorflow-1.9.0-rc1.zip", output_dir) sh.unzip("-o", file_path, "-d", "third_party/tflite") def bazel_build(target, abi="armeabi-v7a", frameworks=None): print("* Build %s with ABI %s" % (target, abi)) if abi == "host": bazel_args = ( "build", target, ) else: bazel_args = ( "build", target, "--config", "android", "--cpu=%s" % abi, "--action_env=ANDROID_NDK_HOME=%s" % os.environ["ANDROID_NDK_HOME"], ) for framework in frameworks: bazel_args += ("--define", "%s=true" % framework.lower()) sh.bazel( _fg=True, *bazel_args) print("Build done!\n") def bazel_target_to_bin(target): # change //aibench/a/b:c to bazel-bin/aibench/a/b/c prefix, bin_name = target.split(':') prefix = prefix.replace('//', '/') if prefix.startswith('/'): prefix = prefix[1:] host_bin_path = "bazel-bin/%s" % prefix return host_bin_path, bin_name def prepare_device_env(serialno, abi, device_bin_path, frameworks): # for snpe if "SNPE" in frameworks and abi == "armeabi-v7a": snpe_lib_path = \ "bazel-mobile-ai-bench/external/snpe/lib/arm-android-gcc4.9" adb_push("bazel-mobile-ai-bench/external/snpe/lib/dsp", device_bin_path, serialno) if snpe_lib_path: adb_push(snpe_lib_path, device_bin_path, serialno) libgnustl_path = os.environ["ANDROID_NDK_HOME"] + \ "/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/" \ "libgnustl_shared.so" % abi adb_push(libgnustl_path, device_bin_path, serialno) # for mace if "MACE" in frameworks and abi == "armeabi-v7a": adb_push("third_party/nnlib/libhexagon_controller.so", device_bin_path, serialno) # for tflite if "TFLITE" in frameworks: tflite_lib_path = "" if abi == "armeabi-v7a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/armeabi-v7a/libtensorflowLite.so" elif abi == "arm64-v8a": tflite_lib_path = \ "third_party/tflite/tensorflow/contrib/lite/" + \ "lib/arm64-v8a/libtensorflowLite.so" if tflite_lib_path: adb_push(tflite_lib_path, device_bin_path, serialno) def prepare_model_and_input(serialno, models_inputs, device_bin_path, output_dir): file_names = [f for f in models_inputs if not f.endswith("_md5_checksum")] for file_name in file_names: file_path = models_inputs[file_name] local_file_path = file_path if file_path.startswith("http"): local_file_path = \ download_file(models_inputs, file_name, output_dir) else: checksum = models_inputs[file_name + "_md5_checksum"] if file_checksum(local_file_path) != checksum: print("file %s md5 checksum not match" % file_name) exit(1) adb_push(local_file_path, device_bin_path, serialno) def prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir, frameworks, build_mace): models_inputs = configs["models_and_inputs"] if "MACE" in frameworks: if build_mace: # mace model files are generated from source for model_file in os.listdir(output_dir): if model_file.endswith(".pb") or model_file.endswith(".data"): model_file_path = output_dir + '/' + model_file adb_push(model_file_path, device_bin_path, serialno) else: prepare_model_and_input(serialno, models_inputs["MACE"], device_bin_path, output_dir) if "SNPE" in frameworks: prepare_model_and_input(serialno, models_inputs["SNPE"], device_bin_path, output_dir) if "TFLITE" in frameworks: prepare_model_and_input(serialno, models_inputs["TFLITE"], device_bin_path, output_dir) # ncnn model files are generated from source if "NCNN" in frameworks: ncnn_model_path = "bazel-genfiles/external/ncnn/models/" adb_push(ncnn_model_path, device_bin_path, serialno) prepare_model_and_input(serialno, models_inputs["NCNN"], device_bin_path, output_dir) def adb_run(abi, serialno, configs, host_bin_path, bin_name, run_interval, num_threads, build_mace, frameworks=None, model_names=None, runtimes=None, device_bin_path="/data/local/tmp/aibench", output_dir="output", ): host_bin_full_path = "%s/%s" % (host_bin_path, bin_name) device_bin_full_path = "%s/%s" % (device_bin_path, bin_name) props = adb_getprop_by_serialno(serialno) print( "=====================================================================" ) print("Trying to lock device %s" % serialno) with device_lock(serialno): print("Run on device: %s, %s, %s" % (serialno, props["ro.board.platform"], props["ro.product.model"])) try: sh.bash("tools/power.sh", serialno, props["ro.board.platform"], _fg=True) except Exception, e: print("Config power exception %s" % str(e)) sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_bin_path) sh.adb("-s", serialno, "shell", "rm -rf %s" % os.path.join(device_bin_path, "interior")) sh.adb("-s", serialno, "shell", "mkdir %s" % os.path.join(device_bin_path, "interior")) prepare_device_env(serialno, abi, device_bin_path, frameworks) prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir, frameworks, build_mace) adb_push(host_bin_full_path, device_bin_path, serialno) print("Run %s" % device_bin_full_path) stdout_buff = [] process_output = make_output_processor(stdout_buff) cmd = "cd %s; ADSP_LIBRARY_PATH='.;/system/lib/rfsa/adsp;/system" \ "/vendor/lib/rfsa/adsp;/dsp'; LD_LIBRARY_PATH=. " \ "./model_benchmark" % device_bin_path if frameworks == ['all']: frameworks = FRAMEWORKS if runtimes == ['all']: runtimes = RUNTIMES if model_names == ['all']: model_names = BENCHMARK_MODELS for runtime in runtimes: for framework in frameworks:
for model_name in model_names: print(framework, runtime, model_name) args = "--run_interval=%d --num_threads=%d " \ "--framework=%s --runtime=%s --model_name=%s " \ "--product_soc=%s.%s" % \ (run_interval, num_threads, framework, runtime, model_name, props["ro.product.model"].replace(" ", ""), props["ro.board.platform"]) sh.adb( "-s", serialno, "shell", "%s %s" % (cmd, args), _tty_in=True, _out=process_output, _err_to_out=True)
conditional_block
auth_handler.go
.Method != http.MethodGet { // https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest // Authorization Servers MUST support the use of the HTTP GET and POST methods defined in // RFC 2616 [RFC2616] at the Authorization Endpoint. return httperr.Newf(http.StatusMethodNotAllowed, "%s (try GET or POST)", r.Method) } oidcUpstream, ldapUpstream, err := chooseUpstreamIDP(idpLister) if err != nil { plog.WarningErr("authorize upstream config", err) return err } if oidcUpstream != nil { return handleAuthRequestForOIDCUpstream(r, w, oauthHelperWithoutStorage, generateCSRF, generateNonce, generatePKCE, oidcUpstream, downstreamIssuer, upstreamStateEncoder, cookieCodec, ) } return handleAuthRequestForLDAPUpstream(r, w, oauthHelperWithStorage, ldapUpstream, ) })) } func handleAuthRequestForLDAPUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, ldapUpstream provider.UpstreamLDAPIdentityProviderI, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } username := r.Header.Get(CustomUsernameHeaderName) password := r.Header.Get(CustomPasswordHeaderName) if username == "" || password == "" { // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err := errors.WithStack(fosite.ErrAccessDenied.WithHintf("Missing or blank username or password.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } authenticateResponse, authenticated, err := ldapUpstream.AuthenticateUser(r.Context(), username, password) if err != nil { plog.WarningErr("unexpected error during upstream LDAP authentication", err, "upstreamName", ldapUpstream.GetName()) return httperr.New(http.StatusBadGateway, "unexpected error during upstream authentication") } if !authenticated { plog.Debug("failed upstream LDAP authentication", "upstreamName", ldapUpstream.GetName()) // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err = errors.WithStack(fosite.ErrAccessDenied.WithHintf("Username/password not accepted by LDAP provider.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } openIDSession := downstreamsession.MakeDownstreamSession( downstreamSubjectFromUpstreamLDAP(ldapUpstream, authenticateResponse), authenticateResponse.User.GetName(), authenticateResponse.User.GetGroups(), ) authorizeResponder, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, openIDSession) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } oauthHelper.WriteAuthorizeResponse(w, authorizeRequester, authorizeResponder) return nil } func handleAuthRequestForOIDCUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generateNonce func() (nonce.Nonce, error), generatePKCE func() (pkce.Code, error), oidcUpstream provider.UpstreamOIDCIdentityProviderI, downstreamIssuer string, upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } now := time.Now() _, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, &openid.DefaultSession{ Claims: &jwt.IDTokenClaims{ // Temporary claim values to allow `NewAuthorizeResponse` to perform other OIDC validations. Subject: "none", AuthTime: now, RequestedAt: now, }, }) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } csrfValue, nonceValue, pkceValue, err := generateValues(generateCSRF, generateNonce, generatePKCE) if err != nil { plog.Error("authorize generate error", err) return err } csrfFromCookie := readCSRFCookie(r, cookieCodec) if csrfFromCookie != "" { csrfValue = csrfFromCookie } upstreamOAuthConfig := oauth2.Config{ ClientID: oidcUpstream.GetClientID(), Endpoint: oauth2.Endpoint{ AuthURL: oidcUpstream.GetAuthorizationURL().String(), }, RedirectURL: fmt.Sprintf("%s/callback", downstreamIssuer), Scopes: oidcUpstream.GetScopes(), } encodedStateParamValue, err := upstreamStateParam( authorizeRequester, oidcUpstream.GetName(), nonceValue, csrfValue, pkceValue, upstreamStateEncoder, ) if err != nil { plog.Error("authorize upstream state param error", err) return err } if csrfFromCookie == "" { // We did not receive an incoming CSRF cookie, so write a new one. err := addCSRFSetCookieHeader(w, csrfValue, cookieCodec) if err != nil { plog.Error("error setting CSRF cookie", err) return err } } authCodeOptions := []oauth2.AuthCodeOption{ oauth2.AccessTypeOffline, nonceValue.Param(), pkceValue.Challenge(), pkceValue.Method(), } promptParam := r.Form.Get("prompt") if promptParam != "" && oidc.ScopeWasRequested(authorizeRequester, coreosoidc.ScopeOpenID) { authCodeOptions = append(authCodeOptions, oauth2.SetAuthURLParam("prompt", promptParam)) } http.Redirect(w, r, upstreamOAuthConfig.AuthCodeURL( encodedStateParamValue, authCodeOptions..., ), 302, ) return nil } func newAuthorizeRequest(r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider) (fosite.AuthorizeRequester, bool) { authorizeRequester, err := oauthHelper.NewAuthorizeRequest(r.Context(), r) if err != nil { plog.Info("authorize request error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil, false } // Automatically grant the openid, offline_access, and pinniped:request-audience scopes, but only if they were requested. // Grant the openid scope (for now) if they asked for it so that `NewAuthorizeResponse` will perform its OIDC validations. // There don't seem to be any validations inside `NewAuthorizeResponse` related to the offline_access scope // at this time, however we will temporarily grant the scope just in case that changes in a future release of fosite. downstreamsession.GrantScopesIfRequested(authorizeRequester) return authorizeRequester, true } func readCSRFCookie(r *http.Request, codec oidc.Decoder) csrftoken.CSRFToken { receivedCSRFCookie, err := r.Cookie(oidc.CSRFCookieName) if err != nil { // Error means that the cookie was not found return "" } var csrfFromCookie csrftoken.CSRFToken err = codec.Decode(oidc.CSRFCookieEncodingName, receivedCSRFCookie.Value, &csrfFromCookie) if err != nil { // We can ignore any errors and just make a new cookie. Hopefully this will // make the user experience better if, for example, the server rotated // cookie signing keys and then a user submitted a very old cookie. return "" } return csrfFromCookie } // Select either an OIDC or an LDAP IDP, or return an error. func chooseUpstreamIDP(idpLister oidc.UpstreamIdentityProvidersLister) (provider.UpstreamOIDCIdentityProviderI, provider.UpstreamLDAPIdentityProviderI, error) { oidcUpstreams := idpLister.GetOIDCIdentityProviders() ldapUpstreams := idpLister.GetLDAPIdentityProviders() switch { case len(oidcUpstreams)+len(ldapUpstreams) == 0: return nil, nil, httperr.New( http.StatusUnprocessableEntity, "No upstream providers are configured", ) case len(oidcUpstreams)+len(ldapUpstreams) > 1: var upstreamIDPNames []string for _, idp := range oidcUpstreams
{ upstreamIDPNames = append(upstreamIDPNames, idp.GetName()) }
conditional_block
auth_handler.go
credential ) func NewHandler( downstreamIssuer string, idpLister oidc.UpstreamIdentityProvidersLister, oauthHelperWithoutStorage fosite.OAuth2Provider, oauthHelperWithStorage fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generatePKCE func() (pkce.Code, error), generateNonce func() (nonce.Nonce, error), upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) http.Handler { return securityheader.Wrap(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost && r.Method != http.MethodGet { // https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest // Authorization Servers MUST support the use of the HTTP GET and POST methods defined in // RFC 2616 [RFC2616] at the Authorization Endpoint. return httperr.Newf(http.StatusMethodNotAllowed, "%s (try GET or POST)", r.Method) } oidcUpstream, ldapUpstream, err := chooseUpstreamIDP(idpLister) if err != nil { plog.WarningErr("authorize upstream config", err) return err } if oidcUpstream != nil { return handleAuthRequestForOIDCUpstream(r, w, oauthHelperWithoutStorage, generateCSRF, generateNonce, generatePKCE, oidcUpstream, downstreamIssuer, upstreamStateEncoder, cookieCodec, ) } return handleAuthRequestForLDAPUpstream(r, w, oauthHelperWithStorage, ldapUpstream, ) })) } func handleAuthRequestForLDAPUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, ldapUpstream provider.UpstreamLDAPIdentityProviderI, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } username := r.Header.Get(CustomUsernameHeaderName) password := r.Header.Get(CustomPasswordHeaderName) if username == "" || password == "" { // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err := errors.WithStack(fosite.ErrAccessDenied.WithHintf("Missing or blank username or password.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } authenticateResponse, authenticated, err := ldapUpstream.AuthenticateUser(r.Context(), username, password) if err != nil { plog.WarningErr("unexpected error during upstream LDAP authentication", err, "upstreamName", ldapUpstream.GetName()) return httperr.New(http.StatusBadGateway, "unexpected error during upstream authentication") } if !authenticated { plog.Debug("failed upstream LDAP authentication", "upstreamName", ldapUpstream.GetName()) // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err = errors.WithStack(fosite.ErrAccessDenied.WithHintf("Username/password not accepted by LDAP provider.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } openIDSession := downstreamsession.MakeDownstreamSession( downstreamSubjectFromUpstreamLDAP(ldapUpstream, authenticateResponse), authenticateResponse.User.GetName(), authenticateResponse.User.GetGroups(), ) authorizeResponder, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, openIDSession) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } oauthHelper.WriteAuthorizeResponse(w, authorizeRequester, authorizeResponder) return nil } func handleAuthRequestForOIDCUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generateNonce func() (nonce.Nonce, error), generatePKCE func() (pkce.Code, error), oidcUpstream provider.UpstreamOIDCIdentityProviderI, downstreamIssuer string, upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) error
csrfValue, nonceValue, pkceValue, err := generateValues(generateCSRF, generateNonce, generatePKCE) if err != nil { plog.Error("authorize generate error", err) return err } csrfFromCookie := readCSRFCookie(r, cookieCodec) if csrfFromCookie != "" { csrfValue = csrfFromCookie } upstreamOAuthConfig := oauth2.Config{ ClientID: oidcUpstream.GetClientID(), Endpoint: oauth2.Endpoint{ AuthURL: oidcUpstream.GetAuthorizationURL().String(), }, RedirectURL: fmt.Sprintf("%s/callback", downstreamIssuer), Scopes: oidcUpstream.GetScopes(), } encodedStateParamValue, err := upstreamStateParam( authorizeRequester, oidcUpstream.GetName(), nonceValue, csrfValue, pkceValue, upstreamStateEncoder, ) if err != nil { plog.Error("authorize upstream state param error", err) return err } if csrfFromCookie == "" { // We did not receive an incoming CSRF cookie, so write a new one. err := addCSRFSetCookieHeader(w, csrfValue, cookieCodec) if err != nil { plog.Error("error setting CSRF cookie", err) return err } } authCodeOptions := []oauth2.AuthCodeOption{ oauth2.AccessTypeOffline, nonceValue.Param(), pkceValue.Challenge(), pkceValue.Method(), } promptParam := r.Form.Get("prompt") if promptParam != "" && oidc.ScopeWasRequested(authorizeRequester, coreosoidc.ScopeOpenID) { authCodeOptions = append(authCodeOptions, oauth2.SetAuthURLParam("prompt", promptParam)) } http.Redirect(w, r, upstreamOAuthConfig.AuthCodeURL( encodedStateParamValue, authCodeOptions..., ), 302, ) return nil } func newAuthorizeRequest(r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider) (fosite.AuthorizeRequester, bool) { authorizeRequester, err := oauthHelper.NewAuthorizeRequest(r.Context(), r) if err != nil { plog.Info("authorize request error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil, false } // Automatically grant the openid, offline_access, and pinniped:request-audience scopes, but only if they were requested. // Grant the openid scope (for now) if they asked for it so that `NewAuthorizeResponse` will perform its OIDC validations. // There don't seem to be any validations inside `NewAuthorizeResponse` related to the offline_access scope // at this time, however we will temporarily grant the scope just in case that changes in a future release of fosite. downstreamsession.GrantScopesIfRequested(authorizeRequester) return authorizeRequester, true } func readCSRFCookie(r *http.Request, codec oidc.Decoder) csrftoken.CSRFToken { receivedCSRFCookie, err := r.Cookie(oidc.CSRFCookieName) if err != nil { // Error means that the cookie was not found return "" } var csrfFromCookie csrftoken.CSRFToken err = codec.Decode(oidc.CSRFCookieEncodingName, receivedCSRFCookie.Value, &csrfFromCookie) if err != nil { // We can ignore any errors and just make a new cookie. Hopefully this will // make the user experience better if, for example, the server rotated // cookie signing keys and then a user submitted a very old cookie. return "" } return csrfFromCookie } // Select either an OIDC or an LDAP IDP, or return an error. func chooseUpstreamIDP(idpLister oidc.UpstreamIdentityProvidersLister) (provider.UpstreamOIDCIdentityProviderI, provider.UpstreamLDAPIdentityProvider
{ authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } now := time.Now() _, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, &openid.DefaultSession{ Claims: &jwt.IDTokenClaims{ // Temporary claim values to allow `NewAuthorizeResponse` to perform other OIDC validations. Subject: "none", AuthTime: now, RequestedAt: now, }, }) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil }
identifier_body
auth_handler.go
a credential ) func NewHandler( downstreamIssuer string, idpLister oidc.UpstreamIdentityProvidersLister, oauthHelperWithoutStorage fosite.OAuth2Provider, oauthHelperWithStorage fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generatePKCE func() (pkce.Code, error), generateNonce func() (nonce.Nonce, error), upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) http.Handler { return securityheader.Wrap(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost && r.Method != http.MethodGet { // https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest // Authorization Servers MUST support the use of the HTTP GET and POST methods defined in // RFC 2616 [RFC2616] at the Authorization Endpoint. return httperr.Newf(http.StatusMethodNotAllowed, "%s (try GET or POST)", r.Method) } oidcUpstream, ldapUpstream, err := chooseUpstreamIDP(idpLister) if err != nil { plog.WarningErr("authorize upstream config", err) return err } if oidcUpstream != nil { return handleAuthRequestForOIDCUpstream(r, w, oauthHelperWithoutStorage, generateCSRF, generateNonce, generatePKCE, oidcUpstream, downstreamIssuer, upstreamStateEncoder, cookieCodec, ) } return handleAuthRequestForLDAPUpstream(r, w, oauthHelperWithStorage, ldapUpstream, ) })) } func handleAuthRequestForLDAPUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, ldapUpstream provider.UpstreamLDAPIdentityProviderI, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } username := r.Header.Get(CustomUsernameHeaderName) password := r.Header.Get(CustomPasswordHeaderName) if username == "" || password == "" { // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err := errors.WithStack(fosite.ErrAccessDenied.WithHintf("Missing or blank username or password.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } authenticateResponse, authenticated, err := ldapUpstream.AuthenticateUser(r.Context(), username, password) if err != nil { plog.WarningErr("unexpected error during upstream LDAP authentication", err, "upstreamName", ldapUpstream.GetName()) return httperr.New(http.StatusBadGateway, "unexpected error during upstream authentication") } if !authenticated { plog.Debug("failed upstream LDAP authentication", "upstreamName", ldapUpstream.GetName()) // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err = errors.WithStack(fosite.ErrAccessDenied.WithHintf("Username/password not accepted by LDAP provider.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } openIDSession := downstreamsession.MakeDownstreamSession( downstreamSubjectFromUpstreamLDAP(ldapUpstream, authenticateResponse), authenticateResponse.User.GetName(), authenticateResponse.User.GetGroups(), ) authorizeResponder, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, openIDSession) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } oauthHelper.WriteAuthorizeResponse(w, authorizeRequester, authorizeResponder) return nil } func
( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generateNonce func() (nonce.Nonce, error), generatePKCE func() (pkce.Code, error), oidcUpstream provider.UpstreamOIDCIdentityProviderI, downstreamIssuer string, upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } now := time.Now() _, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, &openid.DefaultSession{ Claims: &jwt.IDTokenClaims{ // Temporary claim values to allow `NewAuthorizeResponse` to perform other OIDC validations. Subject: "none", AuthTime: now, RequestedAt: now, }, }) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } csrfValue, nonceValue, pkceValue, err := generateValues(generateCSRF, generateNonce, generatePKCE) if err != nil { plog.Error("authorize generate error", err) return err } csrfFromCookie := readCSRFCookie(r, cookieCodec) if csrfFromCookie != "" { csrfValue = csrfFromCookie } upstreamOAuthConfig := oauth2.Config{ ClientID: oidcUpstream.GetClientID(), Endpoint: oauth2.Endpoint{ AuthURL: oidcUpstream.GetAuthorizationURL().String(), }, RedirectURL: fmt.Sprintf("%s/callback", downstreamIssuer), Scopes: oidcUpstream.GetScopes(), } encodedStateParamValue, err := upstreamStateParam( authorizeRequester, oidcUpstream.GetName(), nonceValue, csrfValue, pkceValue, upstreamStateEncoder, ) if err != nil { plog.Error("authorize upstream state param error", err) return err } if csrfFromCookie == "" { // We did not receive an incoming CSRF cookie, so write a new one. err := addCSRFSetCookieHeader(w, csrfValue, cookieCodec) if err != nil { plog.Error("error setting CSRF cookie", err) return err } } authCodeOptions := []oauth2.AuthCodeOption{ oauth2.AccessTypeOffline, nonceValue.Param(), pkceValue.Challenge(), pkceValue.Method(), } promptParam := r.Form.Get("prompt") if promptParam != "" && oidc.ScopeWasRequested(authorizeRequester, coreosoidc.ScopeOpenID) { authCodeOptions = append(authCodeOptions, oauth2.SetAuthURLParam("prompt", promptParam)) } http.Redirect(w, r, upstreamOAuthConfig.AuthCodeURL( encodedStateParamValue, authCodeOptions..., ), 302, ) return nil } func newAuthorizeRequest(r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider) (fosite.AuthorizeRequester, bool) { authorizeRequester, err := oauthHelper.NewAuthorizeRequest(r.Context(), r) if err != nil { plog.Info("authorize request error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil, false } // Automatically grant the openid, offline_access, and pinniped:request-audience scopes, but only if they were requested. // Grant the openid scope (for now) if they asked for it so that `NewAuthorizeResponse` will perform its OIDC validations. // There don't seem to be any validations inside `NewAuthorizeResponse` related to the offline_access scope // at this time, however we will temporarily grant the scope just in case that changes in a future release of fosite. downstreamsession.GrantScopesIfRequested(authorizeRequester) return authorizeRequester, true } func readCSRFCookie(r *http.Request, codec oidc.Decoder) csrftoken.CSRFToken { receivedCSRFCookie, err := r.Cookie(oidc.CSRFCookieName) if err != nil { // Error means that the cookie was not found return "" } var csrfFromCookie csrftoken.CSRFToken err = codec.Decode(oidc.CSRFCookieEncodingName, receivedCSRFCookie.Value, &csrfFromCookie) if err != nil { // We can ignore any errors and just make a new cookie. Hopefully this will // make the user experience better if, for example, the server rotated // cookie signing keys and then a user submitted a very old cookie. return "" } return csrfFromCookie } // Select either an OIDC or an LDAP IDP, or return an error. func chooseUpstreamIDP(idpLister oidc.UpstreamIdentityProvidersLister) (provider.UpstreamOIDCIdentityProviderI, provider.UpstreamLDAPIdentityProvider
handleAuthRequestForOIDCUpstream
identifier_name
auth_handler.go
a credential ) func NewHandler( downstreamIssuer string, idpLister oidc.UpstreamIdentityProvidersLister, oauthHelperWithoutStorage fosite.OAuth2Provider, oauthHelperWithStorage fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generatePKCE func() (pkce.Code, error), generateNonce func() (nonce.Nonce, error), upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) http.Handler { return securityheader.Wrap(httperr.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { if r.Method != http.MethodPost && r.Method != http.MethodGet { // https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest // Authorization Servers MUST support the use of the HTTP GET and POST methods defined in // RFC 2616 [RFC2616] at the Authorization Endpoint. return httperr.Newf(http.StatusMethodNotAllowed, "%s (try GET or POST)", r.Method) } oidcUpstream, ldapUpstream, err := chooseUpstreamIDP(idpLister) if err != nil { plog.WarningErr("authorize upstream config", err) return err } if oidcUpstream != nil { return handleAuthRequestForOIDCUpstream(r, w, oauthHelperWithoutStorage, generateCSRF, generateNonce, generatePKCE, oidcUpstream, downstreamIssuer, upstreamStateEncoder, cookieCodec, ) } return handleAuthRequestForLDAPUpstream(r, w, oauthHelperWithStorage, ldapUpstream, ) })) } func handleAuthRequestForLDAPUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, ldapUpstream provider.UpstreamLDAPIdentityProviderI, ) error {
} username := r.Header.Get(CustomUsernameHeaderName) password := r.Header.Get(CustomPasswordHeaderName) if username == "" || password == "" { // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err := errors.WithStack(fosite.ErrAccessDenied.WithHintf("Missing or blank username or password.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } authenticateResponse, authenticated, err := ldapUpstream.AuthenticateUser(r.Context(), username, password) if err != nil { plog.WarningErr("unexpected error during upstream LDAP authentication", err, "upstreamName", ldapUpstream.GetName()) return httperr.New(http.StatusBadGateway, "unexpected error during upstream authentication") } if !authenticated { plog.Debug("failed upstream LDAP authentication", "upstreamName", ldapUpstream.GetName()) // Return an error according to OIDC spec 3.1.2.6 (second paragraph). err = errors.WithStack(fosite.ErrAccessDenied.WithHintf("Username/password not accepted by LDAP provider.")) plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } openIDSession := downstreamsession.MakeDownstreamSession( downstreamSubjectFromUpstreamLDAP(ldapUpstream, authenticateResponse), authenticateResponse.User.GetName(), authenticateResponse.User.GetGroups(), ) authorizeResponder, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, openIDSession) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } oauthHelper.WriteAuthorizeResponse(w, authorizeRequester, authorizeResponder) return nil } func handleAuthRequestForOIDCUpstream( r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider, generateCSRF func() (csrftoken.CSRFToken, error), generateNonce func() (nonce.Nonce, error), generatePKCE func() (pkce.Code, error), oidcUpstream provider.UpstreamOIDCIdentityProviderI, downstreamIssuer string, upstreamStateEncoder oidc.Encoder, cookieCodec oidc.Codec, ) error { authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil } now := time.Now() _, err := oauthHelper.NewAuthorizeResponse(r.Context(), authorizeRequester, &openid.DefaultSession{ Claims: &jwt.IDTokenClaims{ // Temporary claim values to allow `NewAuthorizeResponse` to perform other OIDC validations. Subject: "none", AuthTime: now, RequestedAt: now, }, }) if err != nil { plog.Info("authorize response error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil } csrfValue, nonceValue, pkceValue, err := generateValues(generateCSRF, generateNonce, generatePKCE) if err != nil { plog.Error("authorize generate error", err) return err } csrfFromCookie := readCSRFCookie(r, cookieCodec) if csrfFromCookie != "" { csrfValue = csrfFromCookie } upstreamOAuthConfig := oauth2.Config{ ClientID: oidcUpstream.GetClientID(), Endpoint: oauth2.Endpoint{ AuthURL: oidcUpstream.GetAuthorizationURL().String(), }, RedirectURL: fmt.Sprintf("%s/callback", downstreamIssuer), Scopes: oidcUpstream.GetScopes(), } encodedStateParamValue, err := upstreamStateParam( authorizeRequester, oidcUpstream.GetName(), nonceValue, csrfValue, pkceValue, upstreamStateEncoder, ) if err != nil { plog.Error("authorize upstream state param error", err) return err } if csrfFromCookie == "" { // We did not receive an incoming CSRF cookie, so write a new one. err := addCSRFSetCookieHeader(w, csrfValue, cookieCodec) if err != nil { plog.Error("error setting CSRF cookie", err) return err } } authCodeOptions := []oauth2.AuthCodeOption{ oauth2.AccessTypeOffline, nonceValue.Param(), pkceValue.Challenge(), pkceValue.Method(), } promptParam := r.Form.Get("prompt") if promptParam != "" && oidc.ScopeWasRequested(authorizeRequester, coreosoidc.ScopeOpenID) { authCodeOptions = append(authCodeOptions, oauth2.SetAuthURLParam("prompt", promptParam)) } http.Redirect(w, r, upstreamOAuthConfig.AuthCodeURL( encodedStateParamValue, authCodeOptions..., ), 302, ) return nil } func newAuthorizeRequest(r *http.Request, w http.ResponseWriter, oauthHelper fosite.OAuth2Provider) (fosite.AuthorizeRequester, bool) { authorizeRequester, err := oauthHelper.NewAuthorizeRequest(r.Context(), r) if err != nil { plog.Info("authorize request error", oidc.FositeErrorForLog(err)...) oauthHelper.WriteAuthorizeError(w, authorizeRequester, err) return nil, false } // Automatically grant the openid, offline_access, and pinniped:request-audience scopes, but only if they were requested. // Grant the openid scope (for now) if they asked for it so that `NewAuthorizeResponse` will perform its OIDC validations. // There don't seem to be any validations inside `NewAuthorizeResponse` related to the offline_access scope // at this time, however we will temporarily grant the scope just in case that changes in a future release of fosite. downstreamsession.GrantScopesIfRequested(authorizeRequester) return authorizeRequester, true } func readCSRFCookie(r *http.Request, codec oidc.Decoder) csrftoken.CSRFToken { receivedCSRFCookie, err := r.Cookie(oidc.CSRFCookieName) if err != nil { // Error means that the cookie was not found return "" } var csrfFromCookie csrftoken.CSRFToken err = codec.Decode(oidc.CSRFCookieEncodingName, receivedCSRFCookie.Value, &csrfFromCookie) if err != nil { // We can ignore any errors and just make a new cookie. Hopefully this will // make the user experience better if, for example, the server rotated // cookie signing keys and then a user submitted a very old cookie. return "" } return csrfFromCookie } // Select either an OIDC or an LDAP IDP, or return an error. func chooseUpstreamIDP(idpLister oidc.UpstreamIdentityProvidersLister) (provider.UpstreamOIDCIdentityProviderI, provider.UpstreamLDAPIdentityProviderI,
authorizeRequester, created := newAuthorizeRequest(r, w, oauthHelper) if !created { return nil
random_line_split
constants.js
.set('number', 'number') .set('string', 'string') .set('weekday', 'weekday') .set('phoneNumber', 'phoneNumber') .set('HH:MM', 'HH:MM') .set('base64', 'base64'); /** Weekdays accepted in the attachment for the field `type` of `weekday`. */ const weekdays = new Map() .set('monday', 'monday') .set('tuesday', 'tuesday') .set('wednesday', 'wednesday') .set('thursday', 'thursday') .set('friday', 'friday') .set('saturday', 'saturday') .set('sunday', 'sunday'); /** * Statuses which are allowed for the `activity`, `subscription` or `office`. */ const activityStatuses = new Map() .set('PENDING', 'PENDING') .set('CONFIRMED', 'CONFIRMED') .set('CANCELLED', 'CANCELLED'); /** * Values of `canEdit` which for validating the `canEditRule`. */ const canEditRules = new Map() .set('ALL', 'ALL') .set('NONE', 'NONE') .set('ADMIN', 'ADMIN') .set('CREATOR', 'CREATOR') .set('EMPLOYEE', 'EMPLOYEE'); const templateFields = new Map() .set('name', 'name') .set('statusOnCreate', 'statusOnCreate') .set('canEditRule', 'canEditRule') .set('venue', 'venue') .set('schedule', 'schedule') .set('comment', 'comment') .set('attachment', 'attachment') .set('hidden', 'hidden'); /** Used while creating comments, to handle vowels correctly. */ const vowels = new Map() .set('a', 'a') .set('e', 'e') .set('i', 'i') .set('o', 'o') .set('u', 'u'); const createBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('template', 'template') .set('activityName', 'activityName') .set('share', 'share') .set('venue', 'venue') .set('schedule', 'schedule') .set('attachment', 'attachment'); const updateBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('activityId', 'activityId') .set('schedule', 'schedule') .set('venue', 'venue'); const shareBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('activityId', 'activityId') .set('share', 'share'); const changeStatusBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('activityId', 'activityId') .set('status', 'status'); const commentBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('activityId', 'activityId') .set('comment', 'comment'); const removeBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('activityId', 'activityId') .set('remove', 'remove'); const phoneNumberUpdateBodyFields = new Map() .set('timestamp', 'timestamp') .set('geopoint', 'geopoint') .set('phoneNumber', 'phoneNumber'); const httpsActions = { share: 'share', update: 'update', create: 'create', comment: 'comment', install: 'install', signup: 'signup', changeStatus: 'change-status', updatePhoneNumber: 'update-phone-number', videoPlay: 'video-play', productView: 'product-view', branchView: 'branch-view', webapp: 'webapp', }; const reportingActions = { clientError: 'clientError', authDeleted: 'authDeleted', authChanged: 'authChanged', authDisabled: 'authDisabled', usedCustomClaims: 'usedCustomClaims', }; const sendGridTemplateIds = { dsr: 'd-e7a922e42d67456dafcc2926731250a0', leave: 'd-ae3a31066b0f447bbf8661570b4dc719', payroll: 'd-cf7785c6a4a04285b1b2cee7d0227052', signUps: 'd-a73b2f579c8746758ba2753fbb0341df', enquiry: 'd-9a2c07b40a644b1b97a5345bbc984c4c', installs: 'd-835f877b46bb4cc8aad6df8d735e27a1', dutyRoster: 'd-9b9c44018c3b41a8805189476a38c172', footprints: 'd-90095557c1c54de1a153626bb0fbe03d', expenseClaim: 'd-ae3a31066b0f447bbf8661570b4dc719', activityReports: 'd-2972abe4d32443fab45c75d901ffb02a', verificationEmail: 'd-7645b372912a490eb2062cf5cc076041', dailyStatusReport: 'd-a48d570e46914d0d8989f77a844a26e9', }; const templatesWithNumber = new Set() .add('bill') .add('invoice') .add('sales order') .add('purchase order'); const templatesSet = new Set() .add('on duty') .add('admin') .add('branch') .add('check-in') .add('customer-type') .add('customer') .add('department') .add('dsr') .add('duty roster') .add('employee') .add('expense claim') .add('expense-type') .add('leave-type') .add('leave') .add('office') .add('product') .add('recipient') .add('subscription') .add('supplier-type') .add('supplier') .add('tour plan') .add('enquiry') .add('material') .add('bill') .add('invoice') .add('sales order') .add('purchase order') .add('payment') .add('collection'); /** * Creating a Set and not using Moment's `moment.tz.names()` * because for iterating the array each time to find * if a timezone exists or not is `O(n^2)`. * Fetching the value from a Set is `O(1)` */ const timezonesSet = new Set() .add('Africa/Abidjan') .add('Africa/Accra') .add('Africa/Addis_Ababa') .add('Africa/Algiers') .add('Africa/Asmara') .add('Africa/Asmera') .add('Africa/Bamako') .add('Africa/Bangui') .add('Africa/Banjul') .add('Africa/Bissau') .add('Africa/Blantyre') .add('Africa/Brazzaville') .add('Africa/Bujumbura') .add('Africa/Cairo') .add('Africa/Casablanca') .add('Africa/Ceuta') .add('Africa/Conakry') .add('Africa/Dakar') .add('Africa/Dar_es_Salaam') .add('Africa/Djibouti') .add('Africa/Douala') .add('Africa/El_Aaiun') .add('Africa/Freetown') .add('Africa/Gaborone') .add('Africa/Harare') .add('Africa/Johannesburg') .add('Africa/Juba') .add('Africa/Kampala') .add('Africa/Khartoum') .add('Africa/Kigali') .add('Africa/Kinshasa') .add('Africa/Lagos') .add('Africa/Librev
.set('email', 'email')
random_line_split
mod.rs
o_order.iter(); if let Some(&start) = iter.next() { ret.insert(start, self.cctx.mk_true()); for &n in iter { let reach_cond = self.cctx.mk_or_from_iter( // manually restrict to slice self.graph .edges_directed(n, Incoming) .filter(|e| slice.edges.contains(e.id())) .map(|e| { let src_cond = ret[&e.source()]; match (&self.graph[e.source()], e.weight()) { (&CfgNode::Condition(c), CfgEdge::True) => { self.cctx.mk_and(src_cond, self.cctx.mk_var(c)) } (&CfgNode::Condition(c), CfgEdge::False) => self .cctx .mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))), (_, CfgEdge::True) => src_cond, (_, CfgEdge::False) => self.cctx.mk_false(), } }), ); let _old = ret.insert(n, reach_cond); debug_assert!(_old.is_none()); } } ret } /// Transforms the loop into a single-entry loop. /// Returns the new loop header. fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex { let mut entry_map = HashMap::new(); for n in loop_nodes { for e in self.graph.edges_directed(n, Incoming) { if !loop_nodes.contains(e.source()) { entry_map.entry(n).or_insert(Vec::new()).push(e.id()); } } } // loop must be reachable, so the header must have entries let header_entries = entry_map.remove(&header).unwrap(); debug_assert!(!header_entries.is_empty()); let abnormal_entry_map = entry_map; if abnormal_entry_map.is_empty() { // no abnormal entries return header; } let abnormal_entry_iter = (1..).zip(&abnormal_entry_map); let struct_var = self.actx.mk_fresh_var(); // make condition cascade let new_header = { let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t)); let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\"")); let mut prev_cascade_node = dummy_preheader; let mut prev_entry_target = header; let mut prev_entry_num = 0; // we make the condition node for the *previous* entry target b/c // the current one might be the last one, which shouldn't get a // condition node because it's the only possible target for (entry_num, entry_target) in abnormal_entry_iter { let prev_cond_eq = self .cctx .new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num)); let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq)); self.graph .add_edge(prev_cascade_node, cascade_node, CfgEdge::False); self.graph .add_edge(cascade_node, prev_entry_target, CfgEdge::True); let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, 0), ))); self.graph .add_edge(struct_reset, entry_target, CfgEdge::True); prev_cascade_node = cascade_node; prev_entry_target = struct_reset; prev_entry_num = entry_num; } self.graph .add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False); // we always add an edge from dummy_preheader let new_header = self.graph.neighbors(dummy_preheader).next().unwrap(); self.graph.remove_node(dummy_preheader); new_header }; // redirect entries for (entry_num, entry_edges) in iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e))) { let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, entry_num), ))); self.graph .add_edge(struct_assign, new_header, CfgEdge::True); for &entry_edge in entry_edges { graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign); } } new_header } /// Incrementally adds nodes dominated by the loop to the loop until /// there's only one successor or there are no more nodes to add. fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () { // reuse this `NodeSet` so we avoid allocating let mut new_nodes = NodeSet::new(); while succ_nodes.len() > 1 { for n in &*succ_nodes { if self .graph .neighbors_directed(n, Incoming) .all(|pred| loop_nodes.contains(pred)) { // post-pone removal from `succ_nodes` b/c rust ownership loop_nodes.insert(n); new_nodes.extend(self.graph.neighbors(n).filter(|&u| !loop_nodes.contains(u))); } } // do the removal succ_nodes.difference_with(&loop_nodes); if new_nodes.is_empty() { break; } succ_nodes.union_with(&new_nodes); new_nodes.clear(); } } /// Transforms the loop so that all loop exits are `break`. /// Returns the new loop successor. fn funnel_abnormal_exits( &mut self, loop_nodes: &mut NodeSet, final_succ: NodeIndex, abn_succ_nodes: &NodeSet, ) -> NodeIndex { // replace "normal" exit edges with "break" { let exit_edges: Vec<_> = graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ) .collect(); for exit_edge in exit_edges { let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break)); graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node); loop_nodes.insert(break_node); } } if abn_succ_nodes.is_empty() { // no abnormal exits return final_succ; } let abn_succ_iter = (1..).zip(abn_succ_nodes); let struct_var = self.actx.mk_fresh_var_zeroed(); // replace abnormal exit edges with "break" for (exit_num, exit_target) in abn_succ_iter.clone() { let exit_edges: Vec<_> = graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, exit_target) .collect(); for exit_edge in exit_edges { let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Seq(vec![ AstNodeC::BasicBlock(self.actx.mk_var_assign(&struct_var, exit_num)), AstNodeC::Break, ]))); graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node); loop_nodes.insert(break_node); } } let mut cur_succ = final_succ; // make condition cascade for (exit_num, exit_target) in abn_succ_iter.clone() { let cond = self .cctx .new_var(self.actx.mk_cond_equals(&struct_var, exit_num)); let cascade_node = self.graph.add_node(CfgNode::Condition(cond)); self.graph .add_edge(cascade_node, exit_target, CfgEdge::True); self.graph.add_edge(cascade_node, cur_succ, CfgEdge::False); cur_succ = cascade_node; } cur_succ } } struct RegionAstContext<'cd, A>(PhantomData<(&'cd (), A)>); impl<'cd, A: AstContext> AstContext for RegionAstContext<'cd, A> { type Block = AstNode<'cd, A>; type Condition = A::Condition; type BoolVariable = A::BoolVariable; type Variable = A::Variable; } impl<'cd, A: AstContext> RegionAstContext<'cd, A> { fn export(ast: AstNode<'cd, Self>) -> AstNode<'cd, A> { use self::AstNodeC::*; match ast { BasicBlock(b) => b, Seq(seq) => Seq(seq.into_iter().map(Self::export).collect()), Cond(c, t, oe) => Cond( c, Box::new(Self::export(*t)), oe.map(|e| Box::new(Self::export(*e))), ), Loop(t, b) => Loop(t, Box::new(Self::export(*b))), Break => Break, Switch(v, cases, default) => Switch( v, cases .into_iter() .map(|(vs, a)| (vs, Self::export(a))) .collect(), Box::new(Self::export(*default)), ), } } } pub fn mk_code_node<A: AstContext>(block: A::Block) -> CfgNode<'static, A> {
CfgNode::Code(AstNodeC::BasicBlock(block))
random_line_split
mod.rs
.cctx, region_graph, old_new_map[&header], ); let ast = dedup_conds::run(&mut self.actx, self.cctx, &region_conditions, ast); let ast = RegionAstContext::<A>::export(ast); refinement::simplify_ast_node::<A>(self.cctx, ast).unwrap_or_default() } /// Computes the reaching condition for every node in the given graph slice. fn reaching_conditions( &self, slice: &graph_utils::GraphSlice<NodeIndex, EdgeIndex>, ) -> HashMap<NodeIndex, Condition<'cd, A>> { // {Node, Edge}Filtered don't implement IntoNeighborsDirected :( // https://github.com/bluss/petgraph/pull/219 // Also EdgeFiltered<Reversed<_>, _> isn't Into{Neighbors, Edges} // because Reversed<_> isn't IntoEdges let mut ret = HashMap::with_capacity(slice.topo_order.len()); let mut iter = slice.topo_order.iter(); if let Some(&start) = iter.next() { ret.insert(start, self.cctx.mk_true()); for &n in iter { let reach_cond = self.cctx.mk_or_from_iter( // manually restrict to slice self.graph .edges_directed(n, Incoming) .filter(|e| slice.edges.contains(e.id())) .map(|e| { let src_cond = ret[&e.source()]; match (&self.graph[e.source()], e.weight()) { (&CfgNode::Condition(c), CfgEdge::True) => { self.cctx.mk_and(src_cond, self.cctx.mk_var(c)) } (&CfgNode::Condition(c), CfgEdge::False) => self .cctx .mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))), (_, CfgEdge::True) => src_cond, (_, CfgEdge::False) => self.cctx.mk_false(), } }), ); let _old = ret.insert(n, reach_cond); debug_assert!(_old.is_none()); } } ret } /// Transforms the loop into a single-entry loop. /// Returns the new loop header. fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex { let mut entry_map = HashMap::new(); for n in loop_nodes { for e in self.graph.edges_directed(n, Incoming) { if !loop_nodes.contains(e.source()) { entry_map.entry(n).or_insert(Vec::new()).push(e.id()); } } } // loop must be reachable, so the header must have entries let header_entries = entry_map.remove(&header).unwrap(); debug_assert!(!header_entries.is_empty()); let abnormal_entry_map = entry_map; if abnormal_entry_map.is_empty() { // no abnormal entries return header; } let abnormal_entry_iter = (1..).zip(&abnormal_entry_map); let struct_var = self.actx.mk_fresh_var(); // make condition cascade let new_header = { let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t)); let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\"")); let mut prev_cascade_node = dummy_preheader; let mut prev_entry_target = header; let mut prev_entry_num = 0; // we make the condition node for the *previous* entry target b/c // the current one might be the last one, which shouldn't get a // condition node because it's the only possible target for (entry_num, entry_target) in abnormal_entry_iter { let prev_cond_eq = self .cctx .new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num)); let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq)); self.graph .add_edge(prev_cascade_node, cascade_node, CfgEdge::False); self.graph .add_edge(cascade_node, prev_entry_target, CfgEdge::True); let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, 0), ))); self.graph .add_edge(struct_reset, entry_target, CfgEdge::True); prev_cascade_node = cascade_node; prev_entry_target = struct_reset; prev_entry_num = entry_num; } self.graph .add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False); // we always add an edge from dummy_preheader let new_header = self.graph.neighbors(dummy_preheader).next().unwrap(); self.graph.remove_node(dummy_preheader); new_header }; // redirect entries for (entry_num, entry_edges) in iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e))) { let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, entry_num), ))); self.graph .add_edge(struct_assign, new_header, CfgEdge::True); for &entry_edge in entry_edges { graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign); } } new_header } /// Incrementally adds nodes dominated by the loop to the loop until /// there's only one successor or there are no more nodes to add. fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () { // reuse this `NodeSet` so we avoid allocating let mut new_nodes = NodeSet::new(); while succ_nodes.len() > 1 { for n in &*succ_nodes { if self .graph .neighbors_directed(n, Incoming) .all(|pred| loop_nodes.contains(pred)) { // post-pone removal from `succ_nodes` b/c rust ownership loop_nodes.insert(n); new_nodes.extend(self.graph.neighbors(n).filter(|&u| !loop_nodes.contains(u))); } } // do the removal succ_nodes.difference_with(&loop_nodes); if new_nodes.is_empty() { break; } succ_nodes.union_with(&new_nodes); new_nodes.clear(); } } /// Transforms the loop so that all loop exits are `break`. /// Returns the new loop successor. fn funnel_abnormal_exits( &mut self, loop_nodes: &mut NodeSet, final_succ: NodeIndex, abn_succ_nodes: &NodeSet, ) -> NodeIndex { // replace "normal" exit edges with "break" { let exit_edges: Vec<_> = graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ) .collect(); for exit_edge in exit_edges { let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break)); graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node); loop_nodes.insert(break_node); } } if abn_succ_nodes.is_empty() { // no abnormal exits return final_succ; } let abn_succ_iter = (1..).zip(abn_succ_nodes); let struct_var = self.actx.mk_fresh_var_zeroed(); // replace abnormal exit edges with "break" for (exit_num, exit_target) in abn_succ_iter.clone() { let exit_edges: Vec<_> = graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, exit_target) .collect(); for exit_edge in exit_edges { let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Seq(vec![ AstNodeC::BasicBlock(self.actx.mk_var_assign(&struct_var, exit_num)), AstNodeC::Break, ]))); graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node); loop_nodes.insert(break_node); } } let mut cur_succ = final_succ; // make condition cascade for (exit_num, exit_target) in abn_succ_iter.clone() { let cond = self .cctx .new_var(self.actx.mk_cond_equals(&struct_var, exit_num)); let cascade_node = self.graph.add_node(CfgNode::Condition(cond)); self.graph .add_edge(cascade_node, exit_target, CfgEdge::True); self.graph.add_edge(cascade_node, cur_succ, CfgEdge::False); cur_succ = cascade_node; } cur_succ } } struct RegionAstContext<'cd, A>(PhantomData<(&'cd (), A)>); impl<'cd, A: AstContext> AstContext for RegionAstContext<'cd, A> { type Block = AstNode<'cd, A>; type Condition = A::Condition; type BoolVariable = A::BoolVariable; type Variable = A::Variable; } impl<'cd, A: AstContext> RegionAstContext<'cd, A> { fn
export
identifier_name
mod.rs
.edges.len()); let mut old_new_map = HashMap::with_capacity(slice.topo_order.len()); let mut region_conditions = Vec::new(); // move all region nodes into `region_graph`. for &old_n in &slice.topo_order { let cfg_node = mem::replace(&mut self.graph[old_n], CfgNode::Dummy("sasr replaced")); if let CfgNode::Condition(c) = cfg_node { // record all conditions in the region region_conditions.push(c); } let new_node = match cfg_node { // refinement needs to be able to see `Break`s CfgNode::Code(AstNodeC::Break) => Some(AstNodeC::Break), // other nodes should be opaque CfgNode::Code(ast) => Some(AstNodeC::BasicBlock(ast)), _ => None, }; let new_n = region_graph.add_node((reaching_conds[&old_n], new_node)); old_new_map.insert(old_n, new_n); } let old_new_map = old_new_map; // copy over edges for e in &slice.edges { let (src, dst) = self.graph.edge_endpoints(e).unwrap(); region_graph.add_edge(old_new_map[&src], old_new_map[&dst], ()); } // remove region nodes from the cfg for &n in &slice.topo_order { // we don't want to remove `header` since that will also remove // incoming edges, which we need to keep if n != header { let _removed = self.graph.remove_node(n); debug_assert!(_removed.is_some()); } } let ast = refinement::refine::<RegionAstContext<A>>( self.cctx, region_graph, old_new_map[&header], ); let ast = dedup_conds::run(&mut self.actx, self.cctx, &region_conditions, ast); let ast = RegionAstContext::<A>::export(ast); refinement::simplify_ast_node::<A>(self.cctx, ast).unwrap_or_default() } /// Computes the reaching condition for every node in the given graph slice. fn reaching_conditions( &self, slice: &graph_utils::GraphSlice<NodeIndex, EdgeIndex>, ) -> HashMap<NodeIndex, Condition<'cd, A>> { // {Node, Edge}Filtered don't implement IntoNeighborsDirected :( // https://github.com/bluss/petgraph/pull/219 // Also EdgeFiltered<Reversed<_>, _> isn't Into{Neighbors, Edges} // because Reversed<_> isn't IntoEdges let mut ret = HashMap::with_capacity(slice.topo_order.len()); let mut iter = slice.topo_order.iter(); if let Some(&start) = iter.next() { ret.insert(start, self.cctx.mk_true()); for &n in iter { let reach_cond = self.cctx.mk_or_from_iter( // manually restrict to slice self.graph .edges_directed(n, Incoming) .filter(|e| slice.edges.contains(e.id())) .map(|e| { let src_cond = ret[&e.source()]; match (&self.graph[e.source()], e.weight()) { (&CfgNode::Condition(c), CfgEdge::True) => { self.cctx.mk_and(src_cond, self.cctx.mk_var(c)) } (&CfgNode::Condition(c), CfgEdge::False) => self .cctx .mk_and(src_cond, self.cctx.mk_not(self.cctx.mk_var(c))), (_, CfgEdge::True) => src_cond, (_, CfgEdge::False) => self.cctx.mk_false(), } }), ); let _old = ret.insert(n, reach_cond); debug_assert!(_old.is_none()); } } ret } /// Transforms the loop into a single-entry loop. /// Returns the new loop header. fn funnel_abnormal_entries(&mut self, header: NodeIndex, loop_nodes: &NodeSet) -> NodeIndex { let mut entry_map = HashMap::new(); for n in loop_nodes { for e in self.graph.edges_directed(n, Incoming) { if !loop_nodes.contains(e.source()) { entry_map.entry(n).or_insert(Vec::new()).push(e.id()); } } } // loop must be reachable, so the header must have entries let header_entries = entry_map.remove(&header).unwrap(); debug_assert!(!header_entries.is_empty()); let abnormal_entry_map = entry_map; if abnormal_entry_map.is_empty() { // no abnormal entries return header; } let abnormal_entry_iter = (1..).zip(&abnormal_entry_map); let struct_var = self.actx.mk_fresh_var(); // make condition cascade let new_header = { let abnormal_entry_iter = abnormal_entry_iter.clone().map(|(n, (&t, _))| (n, t)); let dummy_preheader = self.graph.add_node(CfgNode::Dummy("loop \"preheader\"")); let mut prev_cascade_node = dummy_preheader; let mut prev_entry_target = header; let mut prev_entry_num = 0; // we make the condition node for the *previous* entry target b/c // the current one might be the last one, which shouldn't get a // condition node because it's the only possible target for (entry_num, entry_target) in abnormal_entry_iter { let prev_cond_eq = self .cctx .new_var(self.actx.mk_cond_equals(&struct_var, prev_entry_num)); let cascade_node = self.graph.add_node(CfgNode::Condition(prev_cond_eq)); self.graph .add_edge(prev_cascade_node, cascade_node, CfgEdge::False); self.graph .add_edge(cascade_node, prev_entry_target, CfgEdge::True); let struct_reset = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, 0), ))); self.graph .add_edge(struct_reset, entry_target, CfgEdge::True); prev_cascade_node = cascade_node; prev_entry_target = struct_reset; prev_entry_num = entry_num; } self.graph .add_edge(prev_cascade_node, prev_entry_target, CfgEdge::False); // we always add an edge from dummy_preheader let new_header = self.graph.neighbors(dummy_preheader).next().unwrap(); self.graph.remove_node(dummy_preheader); new_header }; // redirect entries for (entry_num, entry_edges) in iter::once((0, &header_entries)).chain(abnormal_entry_iter.map(|(n, (_, e))| (n, e))) { let struct_assign = self.graph.add_node(CfgNode::Code(AstNodeC::BasicBlock( self.actx.mk_var_assign(&struct_var, entry_num), ))); self.graph .add_edge(struct_assign, new_header, CfgEdge::True); for &entry_edge in entry_edges { graph_utils::retarget_edge(&mut self.graph, entry_edge, struct_assign); } } new_header } /// Incrementally adds nodes dominated by the loop to the loop until /// there's only one successor or there are no more nodes to add. fn refine_loop(&self, loop_nodes: &mut NodeSet, succ_nodes: &mut NodeSet) -> () { // reuse this `NodeSet` so we avoid allocating let mut new_nodes = NodeSet::new(); while succ_nodes.len() > 1 { for n in &*succ_nodes { if self .graph .neighbors_directed(n, Incoming) .all(|pred| loop_nodes.contains(pred)) { // post-pone removal from `succ_nodes` b/c rust ownership loop_nodes.insert(n); new_nodes.extend(self.graph.neighbors(n).filter(|&u| !loop_nodes.contains(u))); } } // do the removal succ_nodes.difference_with(&loop_nodes); if new_nodes.is_empty() { break; } succ_nodes.union_with(&new_nodes); new_nodes.clear(); } } /// Transforms the loop so that all loop exits are `break`. /// Returns the new loop successor. fn funnel_abnormal_exits( &mut self, loop_nodes: &mut NodeSet, final_succ: NodeIndex, abn_succ_nodes: &NodeSet, ) -> NodeIndex
{ // replace "normal" exit edges with "break" { let exit_edges: Vec<_> = graph_utils::edges_from_region_to_node(&self.graph, &loop_nodes, final_succ) .collect(); for exit_edge in exit_edges { let break_node = self.graph.add_node(CfgNode::Code(AstNodeC::Break)); graph_utils::retarget_edge(&mut self.graph, exit_edge, break_node); loop_nodes.insert(break_node); } } if abn_succ_nodes.is_empty() { // no abnormal exits return final_succ; } let abn_succ_iter = (1..).zip(abn_succ_nodes); let struct_var = self.actx.mk_fresh_var_zeroed();
identifier_body
splineIK_FK.py
0],[5.000, 0.000, 0.000],[3.750, 0.000, -1.250],[3.750, 0.000, -0.624], [3.110, 0.000, -0.615],[2.928, 0.000, -1.211],[2.639, 0.000, -1.761],[2.242, 0.000, -2.240], [1.766, 0.000, -2.641],[1.211, 0.000, -2.920],[0.620, 0.000, -3.119],[0.626, 0.000, -3.750], [1.250, 0.000, -3.750],[0.000, 0.000, -5.000]] ctrlName = pm.curve(p = pos,d = 1,name = name) shape = ctrlName.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(13) return ctrlName def createSplineIKRootCtrl(grp,name = "Test"): splineRootCtrl = pm.curve(p=[(0.6, 0, 1), (1.15, 0, 0), (0.6, 0, -1),(-0.6, 0, -1),(-1.15, 0, 0), (-0.6, 0, 1),(0.6, 0, 1)],d=1,name='{0}RootCtrl'.format(name)) grp.addChild(splineRootCtrl) splineRootCtrl.t.set(0,0,0) shape = splineRootCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(6) return splineRootCtrl,shape def createcurveCube(name = 'name', num = 1,multiply = 1): curveCtrl = pm.circle(c=(0, 0, 0), nr=(1, 0, 0), sw=360, d=3, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl'))[0] shape = curveCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(18) pm.scale(shape.cv[:],multiply,multiply,multiply) connectGroup = createTransform(type='transform',name = '%s_ConnectGrp'%(curveCtrl)) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveCtrl),p = connectGroup) sclGroup = createTransform(type='transform',name = '%s_sclGrp'%(curveCtrl),p = curveCtrl) zeroGroup.addChild(curveCtrl) return connectGroup,curveCtrl,sclGroup def getPositions(joints): positions = [] for each in joints: position = each.getTranslation('world') positions.append(position) return positions def createIKSplinecurve(ctrlGrp,positions,number,name = 'Test'): if number == 2: dergee = 1 elif number == 3: dergee = 2 elif number >= 4: dergee = 3 curveName,curveShapeName = createCurve(positions,d = 3,name = '%s_solCurve'%(name)) ctrlGrp.addChild(curveName) cvList = curveShapeName.cv[:] indices = cvList.indices() pocNumber = len(indices) length,curveLength = getIncrement(curveShapeName,number) pos = [] for num in range(number): parameter = curveShapeName.findParamFromLength(length*num) position = curveShapeName.getPointAtParam(parameter,space = 'world') pos.append(position) pm.refresh() ikCtrlCrv,ikCtrlCrvShape= createCurve(pos,d = dergee,name = '%s_spineIKCtrlCurve'%(name)) ctrlGrp.addChild(ikCtrlCrv) pocLength,pocCurveLength = getIncrement(ikCtrlCrvShape,pocNumber) multiply = pocCurveLength/5.0 for num in range(number): pmaName,zeroGroup = createIKCtrl(ctrlGrp,name = name,num = num+1,multiply = multiply) #pmaName.input3D[0].input3D.set(pos[num]) zeroGroup.t.set(pos[num]) pmaName.outputTranslate.connect(ikCtrlCrvShape.controlPoints[num]) for cv in indices: parameter = ikCtrlCrvShape.findParamFromLength(pocLength*cv) pocName = createPoc(parameter,ikCtrlCrvShape,'%s_Poc%03d'%(name,cv)) pocName.position.connect(curveShapeName.controlPoints[cv]) return [curveName,curveShapeName],[ikCtrlCrv,ikCtrlCrvShape] def getIncrement(shape,number): curveLength = shape.length() increment = 1.0/(number - 1) length = increment*curveLength return length,curveLength def createCurve(positions,d = 3,name = 'Test'): curveName = pm.curve(d = d,p = positions,name = name) curveShapeName = curveName.getShape() curveName.v.set(0) curveName.inheritsTransform.set(0) return curveName,curveShapeName def createPoc(parameter,shape,name): pocName = createTransform(type = 'pointOnCurveInfo',name = name) pocName.parameter.set(parameter) shape.worldSpace[0].connect(pocName.inputCurve) return pocName def createIKCtrl(ctrlGrp,name = 'name', num = 1,multiply = 1): pos = [[-1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 0.0, -1.0], [-1.0, 0.0, 1.0]] curveSquare = pm.curve(p=pos, d=1, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl')) shapes = curveSquare.getShapes() for shape in shapes: pm.scale(shape.cv[:],multiply,multiply,multiply) shape.overrideEnabled.set(1) shape.overrideColor.set(17) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveSquare),p=ctrlGrp) zeroGroup.addChild(curveSquare) dpmName = createTransform(type='decomposeMatrix',name='{0}{2}{1:03d}'.format(name, num, 'DPM')) curveSquare.worldMatrix[0].connect(dpmName.inputMatrix,f = 1) return dpmName,zeroGroup def snapToObject(object,transform): tr = object.getTranslation(space = 'world') ro = object.getRotation(space = 'world') transform.t.set(tr) transform.r.set(ro) def createCondition(shape,name): curveInfoName = createTransform(type = 'curveInfo',name = '%s_CI'%(name)) shape.worldSpace[0].connect(curveInfoName.inputCurve) length = shape.length() lengthMD = createTransform(type = 'multiplyDivide',name = '%s_MD'%(name)) lengthMD.operation.set(2) lengthMD.input2X.set(length) conditionMD = createTransform(type = 'multiplyDivide',name = '%s_conMD'%(name)) conditionMD.operation.set(1) lengthMD.outputX.connect(conditionMD.input2X) curveInfoName.arcLength.connect(lengthMD.input1X) setRangeName = createTransform(type = 'setRange',name = '%s_SR'%(name)) setRangeName.outValueX.connect(conditionMD.input1X) setRangeName.oldMaxX.set(10) setRangeName.maxX.set(1) return setRangeName,conditionMD def creatFkCtrlCondition(range,multiply,ikConTr):
position = ikConTr.t.get() setRangeName = createTransform(type = 'setRange',name = '%s_SR'%(ikConTr)) conditionName = createTransform(type = 'condition',name = '%s_Condition'%(ikConTr)) range.outValueX.connect(setRangeName.valueX) range.outValueX.connect(setRangeName.valueY) range.outValueX.connect(setRangeName.valueZ) setRangeName.oldMax.set(1,1,1) setRangeName.oldMin.set(0,0,0) ikConTr.t.connect(setRangeName.max) setRangeName.min.set(position) conditionName.secondTerm.set(1) conditionName.operation.set(3) setRangeName.outValue.connect(conditionName.colorIfFalse) ikConTr.t.connect(conditionName.colorIfTrue) multiply.outputX.connect(conditionName.firstTerm) return conditionName
identifier_body
splineIK_FK.py
.630],[-1.222, 0.000, 2.928],[-0.624, 0.000, 3.106],[-0.626, 0.000, 3.750], [-1.250, 0.000, 3.750],[0.000, 0.000, 5.000],[1.250, 0.000, 3.750],[0.626, 0.000, 3.750], [0.612, 0.000, 3.108],[1.210, 0.000, 2.932],[1.754, 0.000, 2.637],[2.245, 0.000, 2.249], [2.629, 0.000, 1.764],[2.930, 0.000, 1.216],[3.106, 0.000, 0.623],[3.750, 0.000, 0.628], [3.750, 0.000, 1.250],[5.000, 0.000, 0.000],[3.750, 0.000, -1.250],[3.750, 0.000, -0.624], [3.110, 0.000, -0.615],[2.928, 0.000, -1.211],[2.639, 0.000, -1.761],[2.242, 0.000, -2.240], [1.766, 0.000, -2.641],[1.211, 0.000, -2.920],[0.620, 0.000, -3.119],[0.626, 0.000, -3.750], [1.250, 0.000, -3.750],[0.000, 0.000, -5.000]] ctrlName = pm.curve(p = pos,d = 1,name = name) shape = ctrlName.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(13) return ctrlName def createSplineIKRootCtrl(grp,name = "Test"): splineRootCtrl = pm.curve(p=[(0.6, 0, 1), (1.15, 0, 0), (0.6, 0, -1),(-0.6, 0, -1),(-1.15, 0, 0), (-0.6, 0, 1),(0.6, 0, 1)],d=1,name='{0}RootCtrl'.format(name)) grp.addChild(splineRootCtrl) splineRootCtrl.t.set(0,0,0) shape = splineRootCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(6) return splineRootCtrl,shape def createcurveCube(name = 'name', num = 1,multiply = 1): curveCtrl = pm.circle(c=(0, 0, 0), nr=(1, 0, 0), sw=360, d=3, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl'))[0] shape = curveCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(18) pm.scale(shape.cv[:],multiply,multiply,multiply) connectGroup = createTransform(type='transform',name = '%s_ConnectGrp'%(curveCtrl)) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveCtrl),p = connectGroup) sclGroup = createTransform(type='transform',name = '%s_sclGrp'%(curveCtrl),p = curveCtrl) zeroGroup.addChild(curveCtrl) return connectGroup,curveCtrl,sclGroup def getPositions(joints): positions = [] for each in joints: position = each.getTranslation('world') positions.append(position) return positions def createIKSplinecurve(ctrlGrp,positions,number,name = 'Test'): if number == 2: dergee = 1 elif number == 3: dergee = 2 elif number >= 4: dergee = 3 curveName,curveShapeName = createCurve(positions,d = 3,name = '%s_solCurve'%(name)) ctrlGrp.addChild(curveName) cvList = curveShapeName.cv[:] indices = cvList.indices() pocNumber = len(indices)
pos = [] for num in range(number): parameter = curveShapeName.findParamFromLength(length*num) position = curveShapeName.getPointAtParam(parameter,space = 'world') pos.append(position) pm.refresh() ikCtrlCrv,ikCtrlCrvShape= createCurve(pos,d = dergee,name = '%s_spineIKCtrlCurve'%(name)) ctrlGrp.addChild(ikCtrlCrv) pocLength,pocCurveLength = getIncrement(ikCtrlCrvShape,pocNumber) multiply = pocCurveLength/5.0 for num in range(number): pmaName,zeroGroup = createIKCtrl(ctrlGrp,name = name,num = num+1,multiply = multiply) #pmaName.input3D[0].input3D.set(pos[num]) zeroGroup.t.set(pos[num]) pmaName.outputTranslate.connect(ikCtrlCrvShape.controlPoints[num]) for cv in indices: parameter = ikCtrlCrvShape.findParamFromLength(pocLength*cv) pocName = createPoc(parameter,ikCtrlCrvShape,'%s_Poc%03d'%(name,cv)) pocName.position.connect(curveShapeName.controlPoints[cv]) return [curveName,curveShapeName],[ikCtrlCrv,ikCtrlCrvShape] def getIncrement(shape,number): curveLength = shape.length() increment = 1.0/(number - 1) length = increment*curveLength return length,curveLength def createCurve(positions,d = 3,name = 'Test'): curveName = pm.curve(d = d,p = positions,name = name) curveShapeName = curveName.getShape() curveName.v.set(0) curveName.inheritsTransform.set(0) return curveName,curveShapeName def createPoc(parameter,shape,name): pocName = createTransform(type = 'pointOnCurveInfo',name = name) pocName.parameter.set(parameter) shape.worldSpace[0].connect(pocName.inputCurve) return pocName def createIKCtrl(ctrlGrp,name = 'name', num = 1,multiply = 1): pos = [[-1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 0.0, -1.0], [-1.0, 0.0, 1.0]] curveSquare = pm.curve(p=pos, d=1, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl')) shapes = curveSquare.getShapes() for shape in shapes: pm.scale(shape.cv[:],multiply,multiply,multiply) shape.overrideEnabled.set(1) shape.overrideColor.set(17) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveSquare),p=ctrlGrp) zeroGroup.addChild(curveSquare) dpmName = createTransform(type='decomposeMatrix',name='{0}{2}{1:03d}'.format(name, num, 'DPM')) curveSquare.worldMatrix[0].connect(dpmName.inputMatrix,f = 1) return dpmName,zeroGroup def snapToObject(object,transform): tr = object.getTranslation(space = 'world') ro = object.getRotation(space = 'world') transform.t.set(tr) transform.r.set(ro) def createCondition(shape,name): curveInfoName = createTransform(type = 'curveInfo',name = '%s_CI'%(name)) shape.worldSpace[0].connect(curveInfoName.inputCurve) length = shape.length() lengthMD = createTransform(type = 'multiplyDivide',name = '%s_MD'%(name)) lengthMD.operation.set(2) lengthMD.input2X.set(length) conditionMD
length,curveLength = getIncrement(curveShapeName,number)
random_line_split
splineIK_FK.py
.630],[-1.222, 0.000, 2.928],[-0.624, 0.000, 3.106],[-0.626, 0.000, 3.750], [-1.250, 0.000, 3.750],[0.000, 0.000, 5.000],[1.250, 0.000, 3.750],[0.626, 0.000, 3.750], [0.612, 0.000, 3.108],[1.210, 0.000, 2.932],[1.754, 0.000, 2.637],[2.245, 0.000, 2.249], [2.629, 0.000, 1.764],[2.930, 0.000, 1.216],[3.106, 0.000, 0.623],[3.750, 0.000, 0.628], [3.750, 0.000, 1.250],[5.000, 0.000, 0.000],[3.750, 0.000, -1.250],[3.750, 0.000, -0.624], [3.110, 0.000, -0.615],[2.928, 0.000, -1.211],[2.639, 0.000, -1.761],[2.242, 0.000, -2.240], [1.766, 0.000, -2.641],[1.211, 0.000, -2.920],[0.620, 0.000, -3.119],[0.626, 0.000, -3.750], [1.250, 0.000, -3.750],[0.000, 0.000, -5.000]] ctrlName = pm.curve(p = pos,d = 1,name = name) shape = ctrlName.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(13) return ctrlName def createSplineIKRootCtrl(grp,name = "Test"): splineRootCtrl = pm.curve(p=[(0.6, 0, 1), (1.15, 0, 0), (0.6, 0, -1),(-0.6, 0, -1),(-1.15, 0, 0), (-0.6, 0, 1),(0.6, 0, 1)],d=1,name='{0}RootCtrl'.format(name)) grp.addChild(splineRootCtrl) splineRootCtrl.t.set(0,0,0) shape = splineRootCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(6) return splineRootCtrl,shape def createcurveCube(name = 'name', num = 1,multiply = 1): curveCtrl = pm.circle(c=(0, 0, 0), nr=(1, 0, 0), sw=360, d=3, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl'))[0] shape = curveCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(18) pm.scale(shape.cv[:],multiply,multiply,multiply) connectGroup = createTransform(type='transform',name = '%s_ConnectGrp'%(curveCtrl)) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveCtrl),p = connectGroup) sclGroup = createTransform(type='transform',name = '%s_sclGrp'%(curveCtrl),p = curveCtrl) zeroGroup.addChild(curveCtrl) return connectGroup,curveCtrl,sclGroup def getPositions(joints): positions = [] for each in joints: position = each.getTranslation('world') positions.append(position) return positions def createIKSplinecurve(ctrlGrp,positions,number,name = 'Test'): if number == 2: dergee = 1 elif number == 3:
elif number >= 4: dergee = 3 curveName,curveShapeName = createCurve(positions,d = 3,name = '%s_solCurve'%(name)) ctrlGrp.addChild(curveName) cvList = curveShapeName.cv[:] indices = cvList.indices() pocNumber = len(indices) length,curveLength = getIncrement(curveShapeName,number) pos = [] for num in range(number): parameter = curveShapeName.findParamFromLength(length*num) position = curveShapeName.getPointAtParam(parameter,space = 'world') pos.append(position) pm.refresh() ikCtrlCrv,ikCtrlCrvShape= createCurve(pos,d = dergee,name = '%s_spineIKCtrlCurve'%(name)) ctrlGrp.addChild(ikCtrlCrv) pocLength,pocCurveLength = getIncrement(ikCtrlCrvShape,pocNumber) multiply = pocCurveLength/5.0 for num in range(number): pmaName,zeroGroup = createIKCtrl(ctrlGrp,name = name,num = num+1,multiply = multiply) #pmaName.input3D[0].input3D.set(pos[num]) zeroGroup.t.set(pos[num]) pmaName.outputTranslate.connect(ikCtrlCrvShape.controlPoints[num]) for cv in indices: parameter = ikCtrlCrvShape.findParamFromLength(pocLength*cv) pocName = createPoc(parameter,ikCtrlCrvShape,'%s_Poc%03d'%(name,cv)) pocName.position.connect(curveShapeName.controlPoints[cv]) return [curveName,curveShapeName],[ikCtrlCrv,ikCtrlCrvShape] def getIncrement(shape,number): curveLength = shape.length() increment = 1.0/(number - 1) length = increment*curveLength return length,curveLength def createCurve(positions,d = 3,name = 'Test'): curveName = pm.curve(d = d,p = positions,name = name) curveShapeName = curveName.getShape() curveName.v.set(0) curveName.inheritsTransform.set(0) return curveName,curveShapeName def createPoc(parameter,shape,name): pocName = createTransform(type = 'pointOnCurveInfo',name = name) pocName.parameter.set(parameter) shape.worldSpace[0].connect(pocName.inputCurve) return pocName def createIKCtrl(ctrlGrp,name = 'name', num = 1,multiply = 1): pos = [[-1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 0.0, -1.0], [-1.0, 0.0, 1.0]] curveSquare = pm.curve(p=pos, d=1, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl')) shapes = curveSquare.getShapes() for shape in shapes: pm.scale(shape.cv[:],multiply,multiply,multiply) shape.overrideEnabled.set(1) shape.overrideColor.set(17) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveSquare),p=ctrlGrp) zeroGroup.addChild(curveSquare) dpmName = createTransform(type='decomposeMatrix',name='{0}{2}{1:03d}'.format(name, num, 'DPM')) curveSquare.worldMatrix[0].connect(dpmName.inputMatrix,f = 1) return dpmName,zeroGroup def snapToObject(object,transform): tr = object.getTranslation(space = 'world') ro = object.getRotation(space = 'world') transform.t.set(tr) transform.r.set(ro) def createCondition(shape,name): curveInfoName = createTransform(type = 'curveInfo',name = '%s_CI'%(name)) shape.worldSpace[0].connect(curveInfoName.inputCurve) length = shape.length() lengthMD = createTransform(type = 'multiplyDivide',name = '%s_MD'%(name)) lengthMD.operation.set(2) lengthMD.input2X.set(length) conditionMD
dergee = 2
conditional_block
splineIK_FK.py
.630],[-1.222, 0.000, 2.928],[-0.624, 0.000, 3.106],[-0.626, 0.000, 3.750], [-1.250, 0.000, 3.750],[0.000, 0.000, 5.000],[1.250, 0.000, 3.750],[0.626, 0.000, 3.750], [0.612, 0.000, 3.108],[1.210, 0.000, 2.932],[1.754, 0.000, 2.637],[2.245, 0.000, 2.249], [2.629, 0.000, 1.764],[2.930, 0.000, 1.216],[3.106, 0.000, 0.623],[3.750, 0.000, 0.628], [3.750, 0.000, 1.250],[5.000, 0.000, 0.000],[3.750, 0.000, -1.250],[3.750, 0.000, -0.624], [3.110, 0.000, -0.615],[2.928, 0.000, -1.211],[2.639, 0.000, -1.761],[2.242, 0.000, -2.240], [1.766, 0.000, -2.641],[1.211, 0.000, -2.920],[0.620, 0.000, -3.119],[0.626, 0.000, -3.750], [1.250, 0.000, -3.750],[0.000, 0.000, -5.000]] ctrlName = pm.curve(p = pos,d = 1,name = name) shape = ctrlName.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(13) return ctrlName def createSplineIKRootCtrl(grp,name = "Test"): splineRootCtrl = pm.curve(p=[(0.6, 0, 1), (1.15, 0, 0), (0.6, 0, -1),(-0.6, 0, -1),(-1.15, 0, 0), (-0.6, 0, 1),(0.6, 0, 1)],d=1,name='{0}RootCtrl'.format(name)) grp.addChild(splineRootCtrl) splineRootCtrl.t.set(0,0,0) shape = splineRootCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(6) return splineRootCtrl,shape def
(name = 'name', num = 1,multiply = 1): curveCtrl = pm.circle(c=(0, 0, 0), nr=(1, 0, 0), sw=360, d=3, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl'))[0] shape = curveCtrl.getShape() shape.overrideEnabled.set(1) shape.overrideColor.set(18) pm.scale(shape.cv[:],multiply,multiply,multiply) connectGroup = createTransform(type='transform',name = '%s_ConnectGrp'%(curveCtrl)) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveCtrl),p = connectGroup) sclGroup = createTransform(type='transform',name = '%s_sclGrp'%(curveCtrl),p = curveCtrl) zeroGroup.addChild(curveCtrl) return connectGroup,curveCtrl,sclGroup def getPositions(joints): positions = [] for each in joints: position = each.getTranslation('world') positions.append(position) return positions def createIKSplinecurve(ctrlGrp,positions,number,name = 'Test'): if number == 2: dergee = 1 elif number == 3: dergee = 2 elif number >= 4: dergee = 3 curveName,curveShapeName = createCurve(positions,d = 3,name = '%s_solCurve'%(name)) ctrlGrp.addChild(curveName) cvList = curveShapeName.cv[:] indices = cvList.indices() pocNumber = len(indices) length,curveLength = getIncrement(curveShapeName,number) pos = [] for num in range(number): parameter = curveShapeName.findParamFromLength(length*num) position = curveShapeName.getPointAtParam(parameter,space = 'world') pos.append(position) pm.refresh() ikCtrlCrv,ikCtrlCrvShape= createCurve(pos,d = dergee,name = '%s_spineIKCtrlCurve'%(name)) ctrlGrp.addChild(ikCtrlCrv) pocLength,pocCurveLength = getIncrement(ikCtrlCrvShape,pocNumber) multiply = pocCurveLength/5.0 for num in range(number): pmaName,zeroGroup = createIKCtrl(ctrlGrp,name = name,num = num+1,multiply = multiply) #pmaName.input3D[0].input3D.set(pos[num]) zeroGroup.t.set(pos[num]) pmaName.outputTranslate.connect(ikCtrlCrvShape.controlPoints[num]) for cv in indices: parameter = ikCtrlCrvShape.findParamFromLength(pocLength*cv) pocName = createPoc(parameter,ikCtrlCrvShape,'%s_Poc%03d'%(name,cv)) pocName.position.connect(curveShapeName.controlPoints[cv]) return [curveName,curveShapeName],[ikCtrlCrv,ikCtrlCrvShape] def getIncrement(shape,number): curveLength = shape.length() increment = 1.0/(number - 1) length = increment*curveLength return length,curveLength def createCurve(positions,d = 3,name = 'Test'): curveName = pm.curve(d = d,p = positions,name = name) curveShapeName = curveName.getShape() curveName.v.set(0) curveName.inheritsTransform.set(0) return curveName,curveShapeName def createPoc(parameter,shape,name): pocName = createTransform(type = 'pointOnCurveInfo',name = name) pocName.parameter.set(parameter) shape.worldSpace[0].connect(pocName.inputCurve) return pocName def createIKCtrl(ctrlGrp,name = 'name', num = 1,multiply = 1): pos = [[-1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 0.0, -1.0], [-1.0, 0.0, 1.0]] curveSquare = pm.curve(p=pos, d=1, name='{0}{2}{1:03d}'.format(name, num, 'Ctrl')) shapes = curveSquare.getShapes() for shape in shapes: pm.scale(shape.cv[:],multiply,multiply,multiply) shape.overrideEnabled.set(1) shape.overrideColor.set(17) zeroGroup = createTransform(type='transform',name = '%s_Grp'%(curveSquare),p=ctrlGrp) zeroGroup.addChild(curveSquare) dpmName = createTransform(type='decomposeMatrix',name='{0}{2}{1:03d}'.format(name, num, 'DPM')) curveSquare.worldMatrix[0].connect(dpmName.inputMatrix,f = 1) return dpmName,zeroGroup def snapToObject(object,transform): tr = object.getTranslation(space = 'world') ro = object.getRotation(space = 'world') transform.t.set(tr) transform.r.set(ro) def createCondition(shape,name): curveInfoName = createTransform(type = 'curveInfo',name = '%s_CI'%(name)) shape.worldSpace[0].connect(curveInfoName.inputCurve) length = shape.length() lengthMD = createTransform(type = 'multiplyDivide',name = '%s_MD'%(name)) lengthMD.operation.set(2) lengthMD.input2X.set(length) condition
createcurveCube
identifier_name
mainAdmin.js
room this.duration_end = "-"; //end time duration this.mark = false; //mark variable used to check if a closed room has already been printed to section (avoid repeats) this.booker = "-"; } //Declaring each new room var room020 = new Room("020", "Basement", 6); var room120 = new Room("120", "First", 4); var room214 = new Room("214", "Second", 6); var room216 = new Room("216", "Second", 5); var room220 = new Room("220", "Second", 6); var room222 = new Room("222", "Second", 4); var room224 = new Room("224", "Second", 8); var room226 = new Room("226", "Second", 4); var room230 = new Room("230", "Second", 4); var room301 = new Room("301", "Third", 4); var room302 = new Room("302", "Third", 6); var room306 = new Room("306", "Third", 5); var room308 = new Room("308", "Third", 4); var room310 = new Room("310", "Third", 4); var room312 = new Room("312", "Third", 3); var room314 = new Room("314", "Third", 4); //Array to store all classes of all rooms let rooms = [room020, room120, room214, room216, room220, room222, room224, room226, room230, room301, room302, room306, room308, room310, room312, room314]; //array to store closed rooms, initialze to have nothing, will be used to store rooms that have been booked let closed_rooms = []; //Function to render all rooms to screen with appropriate information //user for loop to print out each room class with all information function loadRooms() { for(var i = 0; i < rooms.length; i++) { //if state checking if rooms are open to print availability in green if(rooms[i].open == "Open") { info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("green").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } //prints availability in red if closed else{ info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } } } //Call loadRooms() function to render content to page loadRooms(); //Function to add closed rooms to "closed rooms" section at bottom function
() { for(var i = 0; i < closed_rooms.length; i++) { //loop through closed_rooms array to find closed rooms to print if(closed_rooms[i].mark == false) { para = document.createElement("P"); //create new paragraph element //update text content para.innerHTML = "Room: " + closed_rooms[i].number + " &emsp;Capacity: " + closed_rooms[i].capacity + " &emsp;Availability: " + closed_rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + closed_rooms[i].duration_start + " &emsp;Reservation Ends: " + closed_rooms[i].duration_end + " &emsp;Break time remaining: " + closed_rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; document.getElementById("closed-rooms").appendChild(para); //append paragraph text to closed-rooms section closed_rooms[i].mark = true; //mark closed room as true so it is not printed out multiple times when loop runs again } } } //Book room function //Prompts user to specify room to book //Prompts user for start and end time of reservation //Loops through rooms to find room to book, updates room info accordingly function book_function() { //password protection function, credit to http://www.javascriptkit.com/script/cut10.shtml var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { //while loop to only allow 3 login attempts if (pass1.toLowerCase() == "letmein") { //if password is entered correctly, run button functionality create_rn = prompt("Which room would you like to book?"); tdirs = prompt("Enter reservation start time:"); tdire = prompt("Enter reservation end time:"); person = prompt("Who is booking the room?"); for (var i = 0; i < rooms.length; i++) { if(create_rn == rooms[i].number){ rooms[i].open = "Closed"; rooms[i].duration_start = tdirs; rooms[i].duration_end = tdire; rooms[i].booker = person; closed_rooms.push(rooms[i]); break; } } loadRooms(); //Call loadRooms again to render updated info to page update_listing(); break; //break from password while loop } else { //if password is incorrect, prompt user for next attempt testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //if all attempts are incorrect, return to previous page if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Break time function //Prompts user to specify room to add break time for //Prompts user for length of break time //Loops through rooms to find room, updates room info accordingly function break_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { break_rn = prompt("Which room would you like to add break time for?"); tbr = prompt("How long is the break time?"); for (var i = 0; i < rooms.length; i++) { if(break_rn == rooms[i].number){ rooms[i].breakTime = tbr; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //end while loop if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Cancel room reservation function //Prompts user to specify room to cancel //Loops through rooms to find room, updates room info accordingly function cancel_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { cancel_rn = prompt("Which room would you like to cancel reservation?"); for (var i = 0; i < rooms.length; i++) { if(cancel_rn == rooms[i].number){ rooms[i].duration_start = "-"; rooms[i].duration_end = "-"; rooms[i].open = "Open"; rooms[i].breakTime = 0; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Old code saved for backup// //
update_listing
identifier_name
mainAdmin.js
room this.duration_end = "-"; //end time duration this.mark = false; //mark variable used to check if a closed room has already been printed to section (avoid repeats) this.booker = "-"; } //Declaring each new room var room020 = new Room("020", "Basement", 6); var room120 = new Room("120", "First", 4); var room214 = new Room("214", "Second", 6); var room216 = new Room("216", "Second", 5); var room220 = new Room("220", "Second", 6); var room222 = new Room("222", "Second", 4); var room224 = new Room("224", "Second", 8); var room226 = new Room("226", "Second", 4); var room230 = new Room("230", "Second", 4); var room301 = new Room("301", "Third", 4); var room302 = new Room("302", "Third", 6); var room306 = new Room("306", "Third", 5); var room308 = new Room("308", "Third", 4); var room310 = new Room("310", "Third", 4); var room312 = new Room("312", "Third", 3); var room314 = new Room("314", "Third", 4); //Array to store all classes of all rooms let rooms = [room020, room120, room214, room216, room220, room222, room224, room226, room230, room301, room302, room306, room308, room310, room312, room314]; //array to store closed rooms, initialze to have nothing, will be used to store rooms that have been booked let closed_rooms = []; //Function to render all rooms to screen with appropriate information //user for loop to print out each room class with all information function loadRooms()
} //Call loadRooms() function to render content to page loadRooms(); //Function to add closed rooms to "closed rooms" section at bottom function update_listing() { for(var i = 0; i < closed_rooms.length; i++) { //loop through closed_rooms array to find closed rooms to print if(closed_rooms[i].mark == false) { para = document.createElement("P"); //create new paragraph element //update text content para.innerHTML = "Room: " + closed_rooms[i].number + " &emsp;Capacity: " + closed_rooms[i].capacity + " &emsp;Availability: " + closed_rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + closed_rooms[i].duration_start + " &emsp;Reservation Ends: " + closed_rooms[i].duration_end + " &emsp;Break time remaining: " + closed_rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; document.getElementById("closed-rooms").appendChild(para); //append paragraph text to closed-rooms section closed_rooms[i].mark = true; //mark closed room as true so it is not printed out multiple times when loop runs again } } } //Book room function //Prompts user to specify room to book //Prompts user for start and end time of reservation //Loops through rooms to find room to book, updates room info accordingly function book_function() { //password protection function, credit to http://www.javascriptkit.com/script/cut10.shtml var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { //while loop to only allow 3 login attempts if (pass1.toLowerCase() == "letmein") { //if password is entered correctly, run button functionality create_rn = prompt("Which room would you like to book?"); tdirs = prompt("Enter reservation start time:"); tdire = prompt("Enter reservation end time:"); person = prompt("Who is booking the room?"); for (var i = 0; i < rooms.length; i++) { if(create_rn == rooms[i].number){ rooms[i].open = "Closed"; rooms[i].duration_start = tdirs; rooms[i].duration_end = tdire; rooms[i].booker = person; closed_rooms.push(rooms[i]); break; } } loadRooms(); //Call loadRooms again to render updated info to page update_listing(); break; //break from password while loop } else { //if password is incorrect, prompt user for next attempt testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //if all attempts are incorrect, return to previous page if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Break time function //Prompts user to specify room to add break time for //Prompts user for length of break time //Loops through rooms to find room, updates room info accordingly function break_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { break_rn = prompt("Which room would you like to add break time for?"); tbr = prompt("How long is the break time?"); for (var i = 0; i < rooms.length; i++) { if(break_rn == rooms[i].number){ rooms[i].breakTime = tbr; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //end while loop if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Cancel room reservation function //Prompts user to specify room to cancel //Loops through rooms to find room, updates room info accordingly function cancel_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { cancel_rn = prompt("Which room would you like to cancel reservation?"); for (var i = 0; i < rooms.length; i++) { if(cancel_rn == rooms[i].number){ rooms[i].duration_start = "-"; rooms[i].duration_end = "-"; rooms[i].open = "Open"; rooms[i].breakTime = 0; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Old code saved for backup// //
{ for(var i = 0; i < rooms.length; i++) { //if state checking if rooms are open to print availability in green if(rooms[i].open == "Open") { info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("green").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } //prints availability in red if closed else{ info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } }
identifier_body
mainAdmin.js
room this.duration_end = "-"; //end time duration this.mark = false; //mark variable used to check if a closed room has already been printed to section (avoid repeats) this.booker = "-"; } //Declaring each new room var room020 = new Room("020", "Basement", 6);
var room120 = new Room("120", "First", 4); var room214 = new Room("214", "Second", 6); var room216 = new Room("216", "Second", 5); var room220 = new Room("220", "Second", 6); var room222 = new Room("222", "Second", 4); var room224 = new Room("224", "Second", 8); var room226 = new Room("226", "Second", 4); var room230 = new Room("230", "Second", 4); var room301 = new Room("301", "Third", 4); var room302 = new Room("302", "Third", 6); var room306 = new Room("306", "Third", 5); var room308 = new Room("308", "Third", 4); var room310 = new Room("310", "Third", 4); var room312 = new Room("312", "Third", 3); var room314 = new Room("314", "Third", 4); //Array to store all classes of all rooms let rooms = [room020, room120, room214, room216, room220, room222, room224, room226, room230, room301, room302, room306, room308, room310, room312, room314]; //array to store closed rooms, initialze to have nothing, will be used to store rooms that have been booked let closed_rooms = []; //Function to render all rooms to screen with appropriate information //user for loop to print out each room class with all information function loadRooms() { for(var i = 0; i < rooms.length; i++) { //if state checking if rooms are open to print availability in green if(rooms[i].open == "Open") { info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("green").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } //prints availability in red if closed else{ info = document.getElementsByClassName('a_room'); //get each <p> in html file to be able to print info[i].innerHTML = "Room: " + rooms[i].number + " &emsp;Capacity: " + rooms[i].capacity + " &emsp;Availability: " + rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + rooms[i].duration_start + " &emsp;Reservation Ends: " + rooms[i].duration_end + " &emsp;Break time remaining: " +rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; } } } //Call loadRooms() function to render content to page loadRooms(); //Function to add closed rooms to "closed rooms" section at bottom function update_listing() { for(var i = 0; i < closed_rooms.length; i++) { //loop through closed_rooms array to find closed rooms to print if(closed_rooms[i].mark == false) { para = document.createElement("P"); //create new paragraph element //update text content para.innerHTML = "Room: " + closed_rooms[i].number + " &emsp;Capacity: " + closed_rooms[i].capacity + " &emsp;Availability: " + closed_rooms[i].open.fontcolor("red").bold() + " &emsp;Reservation Starts: " + closed_rooms[i].duration_start + " &emsp;Reservation Ends: " + closed_rooms[i].duration_end + " &emsp;Break time remaining: " + closed_rooms[i].breakTime + "mins" + "&emsp;" + " &emsp;Booker: " + rooms[i].booker; document.getElementById("closed-rooms").appendChild(para); //append paragraph text to closed-rooms section closed_rooms[i].mark = true; //mark closed room as true so it is not printed out multiple times when loop runs again } } } //Book room function //Prompts user to specify room to book //Prompts user for start and end time of reservation //Loops through rooms to find room to book, updates room info accordingly function book_function() { //password protection function, credit to http://www.javascriptkit.com/script/cut10.shtml var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { //while loop to only allow 3 login attempts if (pass1.toLowerCase() == "letmein") { //if password is entered correctly, run button functionality create_rn = prompt("Which room would you like to book?"); tdirs = prompt("Enter reservation start time:"); tdire = prompt("Enter reservation end time:"); person = prompt("Who is booking the room?"); for (var i = 0; i < rooms.length; i++) { if(create_rn == rooms[i].number){ rooms[i].open = "Closed"; rooms[i].duration_start = tdirs; rooms[i].duration_end = tdire; rooms[i].booker = person; closed_rooms.push(rooms[i]); break; } } loadRooms(); //Call loadRooms again to render updated info to page update_listing(); break; //break from password while loop } else { //if password is incorrect, prompt user for next attempt testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //if all attempts are incorrect, return to previous page if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Break time function //Prompts user to specify room to add break time for //Prompts user for length of break time //Loops through rooms to find room, updates room info accordingly function break_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { break_rn = prompt("Which room would you like to add break time for?"); tbr = prompt("How long is the break time?"); for (var i = 0; i < rooms.length; i++) { if(break_rn == rooms[i].number){ rooms[i].breakTime = tbr; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } //end while loop if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Cancel room reservation function //Prompts user to specify room to cancel //Loops through rooms to find room, updates room info accordingly function cancel_function() { var testV = 1; var pass1 = prompt('Please Enter Your Password','password'); while (testV < 3) { if (pass1.toLowerCase() == "letmein") { cancel_rn = prompt("Which room would you like to cancel reservation?"); for (var i = 0; i < rooms.length; i++) { if(cancel_rn == rooms[i].number){ rooms[i].duration_start = "-"; rooms[i].duration_end = "-"; rooms[i].open = "Open"; rooms[i].breakTime = 0; break; } } loadRooms(); break; } else { testV+=1; var pass1 = prompt('Access Denied - Password Incorrect, Please Try Again.','Password'); } } if (pass1.toLowerCase()!="password" & testV ==3) history.go(-1); return " "; } //Old code saved for backup// // //
random_line_split
more-fr.js
Accéder au lien : " lang.linktext="Lien" lang.nofollow="Nofollow" lang.accesskey="Clé d\x27accès" lang.tabindex="Index de tabulation" lang.other="Autre" lang.cssclass="Classe" lang.internallink="Pages du site" lang.browse="Parcourir" lang.existinganchor="Sélectionner une ancre nommée de la page courante" lang.attr_target="Cible" lang.attr_targetblank="Nouvelle fenêtre" lang.attr_targetparent="Fenêtre Parent" lang.attr_targetself="Cette fenêtre" lang.attr_targettop="Fenêtre au premier plan" lang.deleteselected="Supprimer l\x27élément sélectionné" lang.used="Utilisé" lang.unlimited="Illimité" lang.pastetitle="Utiliser les touches CTRL + V pour coller le contenu dans la boîte ci-dessous : " lang.pastetexttitle="Utiliser les touches CTRL + V pour coller le texte dans la case ci-dessous : " lang.pastewordtitle="Utiliser les touches CTRL + V pour coller le contenu Word dans la boîte ci-dessous : " lang.closeafterpaste="Fermer automatiquement la boîte de dialogue après avoir appuyé sur Ctrl + V" lang.keeplinebreaks="Conserver les sauts de ligne" lang.findwhat="Trouver : " lang.replacewith="Remplacer par : " lang.replaceall="Tout remplacer" lang.matchcase="Respecter la casse" lang.matchword="Chercher tous les mots" lang.youtubeurl="URL Youtube:" lang.usehttps="Utiliser https" lang.autoplay="Démarrage automatique" lang.autoloop="Boucle automatique" lang.allowmenu="Activer le menu" lang.showcontrols="Afficher les contrôles" lang.loop="Boucle" lang.hideinfo="Masquer le titre" lang.hidetoolbar="Masquer la barre d\x27outils" lang.lighttheme="Thème de la lumière" lang.youtubepreview="Une image YouTube sera affichée ici lorsque vous aurez indiqué une URL Youtube." lang.allowfullscreen="Autoriser l\x27affichage plein écran" lang.transparency="Transparence" lang.search="Rechercher" lang.tooltip_createfolder="Créer un dossier" lang.tooltip_moveitems="Déplacer les fichiers ou dossiers" lang.tooltip_copyitems="Copier des fichiers ou des dossiers" lang.tooltip_deleteitems="Supprimer les fichiers ou dossiers" lang.menu_sortbynameasc="Trier par nom asc" lang.menu_sortbynamedesc="Trier par nom desc" lang.menu_sortbysizeasc="Trier par taille asc" lang.menu_sortbysizedesc="Trier par taille desc" lang.menu_sortbydateasc="Trier par date asc" lang.menu_sortbydatedesc="Trier par date desc" lang.msg_itembemoved="éléments ont été déplacés avec succès." lang.msg_itembecopied="articles ont été copiés avec succès." lang.msg_failedrename="Impossible de renommer cet article." lang.msg_deny_copyfile="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: copier des fichiers" lang.msg_deny_copyfolder="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: copier des dossiers" lang.msg_deny_movefile="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: déplacer les fichiers" lang.msg_deny_movefolder="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: déplacer des dossiers" lang.msg_deny_deletefile="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: supprimer des fichiers" lang.msg_deny_deletefolder="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: supprimer des dossiers" lang.msg_deny_renamefile="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: renommer des fichiers" lang.msg_deny_renamefolder="Action interdite. Vous n\x27avez pas la permission d\x27accéder à cette fonction: renommer des dossiers" lang.msg_foldercreated="Dossier \x22{0} \x27a été créé avec succès." lang.msg_itemexiststitle="Confirmer que le fichier existe"
lang.msg_overwrite="Écraser" lang.msg_rename="Renommer" lang.msg_skip="Sauter" lang.msg_requireselecteditems="S\x27il vous plaît sélectionner des dossiers ou des fichiers puis essayez à nouveau." lang.msg_namebeused="Il existe déjà un fichier ou un dossier portant le même nom ({0}) à cet endroit." lang.msg_sameforitems="Appliquer cette action pour les prochaines {0} articles." lang.msg_displayuploaded="Le tableau affiche les fichiers les plus récemment mis en ligne." lang.msg_showall="Afficher tous les fichiers" lang.prompt_newname="S\x27il vous plaît, entrer le nouveau nom:" lang.prompt_newfoldername="S\x27il vous plaît, entrer le nouveau nom de dossier:" lang.confirm_deleteitem="Etes-vous sûr de vouloir le supprimer?" lang.confirm_deleteitems="Etes-vous sûr de vouloir supprimer les éléments sélectionnés ({0})?" lang.label_selectfilestoupload="S\x27il vous plaît, sélectionner les fichiers à télécharger" lang.label_maxfilesize="Taille du fichier Max:" lang.label_maxfoldersize="Taille du dossier Max:" lang.label_maxdimensions="Dimensions maxi :" lang.label_allowextensions="Extensions autorisées :" lang.msg_nofindmatch="L\x27élément de recherche n\x27a pas été trouvé"; lang.msg_replaceall="La recherche du document a été complétée et {0} remplacements ont été effectués." lang.msg_finishreplace="Éditeur a terminé la recherche du document." lang.automatic="Automatique" lang.morecolors="Plus les couleurs ..." lang.webpalette="Palette Web" lang.namedcolors="Couleurs nommées" lang.customcolor="Couleur personnalisée" lang.basic="De base" lang.additional="Supplémentaire" lang.usecolornames="Utiliser des noms de couleurs" lang.codelanguage="Langage" lang.wordnotindictionary="Mot absent du dictionnaire" lang.cellspacing="Espacement entre les cellules" lang.cellpadding="Marge intérieure des cellules" lang.rules="Règles" lang.bordercollapse="Réduire les bordures du tableau" lang.attributes="Attributs" lang.backgroundcolor="Couleur de fond" lang.summary="Résumé" lang.caption="Légende" lang.align="Alignement" lang.inherit="Hérite" lang.block="Blocs" lang.clean_matchitems="Supprimer" lang.clean_unmatchitems="Analyse impossible" lang.msg_cleancode_nomatches="Aucun code HTML erroné. Votre code html est propre." lang.clean_removeemptymargin="Retirer les marges vides" lang.clean_encodespecialchars="Convertir tous les caractères applicables aux entités HTML" lang.clean_fixaccessbility="Attributs de remplissage automatique accessbilité, par exemple <img alt=\x27\x27>" lang.clean_removeemptytags="Enlever les étiquettes de conteneurs vides <span></span> <strong/> ...</strong>" lang.clean_removecomments="Nettoyer tous les codes commentaire <!--...-->" lang.clean_removespannoattr="Enlever les étiquettes portées sans aucun attribut" lang.clean_removefonts="Nettoyer les balises<font>...</font>" lang.clean_mergestyle="Fusionner les balises adjacentes" lang.clean_wordfilter="Balisage spécifiqueMicrosoft Word"; lang.tagstoremove="Balises spécifiques" lang.border_notset="Non défini" lang.border_solid="solide" lang.border_dashed="en pointillés" lang.border_dotted="pointillé" lang.border_inset="encart" lang.border_outset="début" lang.attr_alt="Un texte alternatif" lang.longdesc="Longue description" lang.border="Bordures" lang.margin="Marges extérieures" lang.padding="Marges intérieures" lang.alignment="Alignement" lang.url="URL" lang.namedanchor="Ancre nommée" lang.anchorname="Nom de l\x27ancre" lang.empty="Vide" lang.codesnippet="Balises ADN" lang.action="Action" lang.codesnippet="Balises ADN" lang.method="Méthode" lang.encodingtype="Type d\x27encodage" lang.disabled="Désactivé" lang.readonly="Lecture seule" lang
lang.msg_itemexists="Il existe déjà un fichier portant le même nom ({0}) à cet endroit. \x5C N \x5C nVoulez-vous remplacer le fichier existant?"
random_line_split
grafana.py
grafana administrator password.") parser.add_option("-e", "--editor", dest="editor", help="Create a user with editing authority. You need to enter username, password and email address \ (this mailbox will be used to receive alarm messages) in order and separated them by comma. \ i.e. \"esgyn,password,[email protected]\"") parser.add_option("-s", "--smtp", dest="smtp",
return options def format_output(text): num = len(text) + 4 print ('*' * num) print (' ' + text) print ('*' * num) def log_output(msg): logger.info("****%s****" % msg) format_output(msg) def skip(msg): print('\33[32m***[SKIP]: %s \33[0m' % msg) def info(msg): print('\33[33m***[INFO]: %s \33[0m' % msg) def error(msg, logout=True): print('\n\33[35m***[ERROR]: %s \33[0m' % msg) if logout: logger.error(msg) sys.exit(1) def load_user(): if os.path.exists(TMP_USERINFO): with open(TMP_USERINFO, "r") as user_info: userinfo = json.load(user_info) else: userinfo = {"user": "admin", "psword": "admin"} return userinfo["user"], userinfo["psword"] class Grafana(object): def __init__(self, admin_user, admin_psword): self.admin_user = admin_user self.admin_psword = admin_psword self.ip = socket.gethostbyname(socket.gethostname()) self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) self.headers = {"Content-Type": 'application/json', "Accept": 'application/json'} def switch_request(self, mode, api, data=""): url = self.url + api data = json.dumps(data) switcher = { "get": requests.get(url, headers=self.headers), "put": requests.put(url, data=data, headers=self.headers), "post": requests.post(url, data=data, headers=self.headers), "patch": requests.patch(url, data=data, headers=self.headers) } return switcher.get(mode, "Nothing") def set_admin_psw(self, new_admin_psword): log_output("Set Admin Password") psw_api = ':3000/api/user/password' data = {"oldPassword": self.admin_psword, "newPassword": new_admin_psword, "confirNew": new_admin_psword} put_psw = self.switch_request("put", psw_api, data) if put_psw.status_code == 200: logger.info("Admin password Updated! %s %s" % (put_psw, put_psw.text)) info("Admin password Updated!") self.admin_psword = new_admin_psword self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) else: error("Password Update Error %s %s" % (put_psw, put_psw.text)) def set_editor(self, editor, editor_psword, email): log_output("Create Editor") editor_api = ':3000/api/admin/users' data = {"name": editor, "email": email, "login": editor, "password": editor_psword} editor = self.switch_request("post", editor_api, data) if editor.status_code == 200: editor_id = json.loads(editor.text)["id"] org_api = ':3000/api/org/users/' + str(editor_id) data = {"role": "Editor"} self.switch_request("patch", org_api, data) logger.info("Editor created successfully! %s %s" % (editor, editor.text)) info("Editor created successfully!") elif editor.status_code == 500: logger.info("This editor has been created.\nSkip create this editor.") skip("This editor has been created.\nSkip create this editor.") else: error("Editor created Error %s %s" % (editor, editor.text)) def notification_import(self, receive_addr): log_output("Start importing alert notification...") noti_api = ':3000/api/alert-notifications' data = {"sendReminder": False, "type": "email", "name": "Esgyndb Notification", "isDefault": False, "settings": {"addresses": ""}} data["settings"]["addresses"] = receive_addr response = self.switch_request("post", noti_api, data) if response.status_code == 200: logger.info("Alert notification import successfully!") info("Alert notification import successfully!") elif response.status_code == 500: logger.info("This notifiction has been existed.\nSkip import this notifiction.") skip("This notification has been existed.\nSkip import this notifiction.") else: error("Alert Notification Import Error %s %s" % (response, response.text)) def templet_import(self, mode, ds_name): # import dashbord or datasource title = ds_name + ' ' + mode if mode == 'dashboard': get_api = ':3000/api/dashboards/uid/esgyndb' imp_url = self.url + ':3000/api/dashboards/db' elif mode == 'datasource': get_api = ':3000/api/datasources/name/%s' % ds_name.lower() imp_url = self.url + ':3000/api/datasources' log_output("Check %s" % title) check = self.switch_request("get", get_api) logger.info("%s %s" % (check, check.text)) if check.status_code == 200: logger.info("This %s has been existed.\nSkip import this %s." % (title, title)) skip("This %s has been existed.\nSkip import this %s." % (title, title)) elif check.status_code == 404: info("%s dosen't exist." % title) log_output("Start importing %s..." % title) data = open('%s_%s.json' % (ds_name.lower(), mode.lower()), 'rb') response = requests.post(imp_url, data=data, headers=self.headers) logger.info("%s %s" % (response, response.text)) if response.status_code == 200: logger.info("%s import successfully!" % title) info("%s import successfully!" % title) else: error("%s Import Error %s %s" % (mode, response, response.text)) elif check.status_code != 200 and check.status_code != 404: error("Check error %s %s" % (check, check.text)) def start_db(self): log_output("Start Dashboard") search_api = ':3000/api/search' search = self.switch_request("get", search_api) data = search.text.encode() data = json.loads(data) for d in data: if d["uid"] == "esgyndb": db_id = d["id"] break start_api = ':3000/api/user/stars/dashboard/' + str(db_id) start = self.switch_request("post", start_api) if start.status_code == 200: logger.info("%s %s" % (start, start.text)) info("Dashboard started") elif start.status_code == 500: logger.info("%s %s" % (start, start.text)) skip("This dashboard has been started.\nSkip this process.") else: error("Dashboard Start Error %s %s" % (start, start.text)) def set_smtp(self, sendmail, smtp_host, smtp_psword): data = """enabled=true host=%s user=%s password=%s from_address=%s from_name = Grafana """ % (smtp_host, sendmail, smtp_psword, sendmail) log_output("Set grafana.ini") if os.path.exists(GRA_CONFILE): confile = open(GRA_CONFILE).readlines() lines = [] config = ConfigParser.ConfigParser() config.read(GRA_CONFILE) item = config.items('smtp') for i in range(len(confile)): lines.append(confile[i]) if "[smtp]" in confile[i]: linenum = i if item: logger.info("Skip set grafana.ini. It has been changed.") skip("Skip set grafana.ini. It has been changed.") elif not item: lines.insert(linenum+1, data) s = ''.join(lines) with open(GRA_CONFILE, 'w') as confile: confile.write(s) p = subprocess.Popen("
help="Set the mailbox to send the alarm message. You need to enter email address, smtp server \ and password in order and separated them by comma. i.e. \"[email protected],smtp.qq.com,password\"") options, args = parser.parse_args()
random_line_split
grafana.py
]: %s \33[0m' % msg) def error(msg, logout=True): print('\n\33[35m***[ERROR]: %s \33[0m' % msg) if logout: logger.error(msg) sys.exit(1) def load_user(): if os.path.exists(TMP_USERINFO): with open(TMP_USERINFO, "r") as user_info: userinfo = json.load(user_info) else: userinfo = {"user": "admin", "psword": "admin"} return userinfo["user"], userinfo["psword"] class Grafana(object): def __init__(self, admin_user, admin_psword): self.admin_user = admin_user self.admin_psword = admin_psword self.ip = socket.gethostbyname(socket.gethostname()) self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) self.headers = {"Content-Type": 'application/json', "Accept": 'application/json'} def switch_request(self, mode, api, data=""): url = self.url + api data = json.dumps(data) switcher = { "get": requests.get(url, headers=self.headers), "put": requests.put(url, data=data, headers=self.headers), "post": requests.post(url, data=data, headers=self.headers), "patch": requests.patch(url, data=data, headers=self.headers) } return switcher.get(mode, "Nothing") def set_admin_psw(self, new_admin_psword): log_output("Set Admin Password") psw_api = ':3000/api/user/password' data = {"oldPassword": self.admin_psword, "newPassword": new_admin_psword, "confirNew": new_admin_psword} put_psw = self.switch_request("put", psw_api, data) if put_psw.status_code == 200: logger.info("Admin password Updated! %s %s" % (put_psw, put_psw.text)) info("Admin password Updated!") self.admin_psword = new_admin_psword self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) else: error("Password Update Error %s %s" % (put_psw, put_psw.text)) def set_editor(self, editor, editor_psword, email): log_output("Create Editor") editor_api = ':3000/api/admin/users' data = {"name": editor, "email": email, "login": editor, "password": editor_psword} editor = self.switch_request("post", editor_api, data) if editor.status_code == 200: editor_id = json.loads(editor.text)["id"] org_api = ':3000/api/org/users/' + str(editor_id) data = {"role": "Editor"} self.switch_request("patch", org_api, data) logger.info("Editor created successfully! %s %s" % (editor, editor.text)) info("Editor created successfully!") elif editor.status_code == 500: logger.info("This editor has been created.\nSkip create this editor.") skip("This editor has been created.\nSkip create this editor.") else: error("Editor created Error %s %s" % (editor, editor.text)) def notification_import(self, receive_addr): log_output("Start importing alert notification...") noti_api = ':3000/api/alert-notifications' data = {"sendReminder": False, "type": "email", "name": "Esgyndb Notification", "isDefault": False, "settings": {"addresses": ""}} data["settings"]["addresses"] = receive_addr response = self.switch_request("post", noti_api, data) if response.status_code == 200: logger.info("Alert notification import successfully!") info("Alert notification import successfully!") elif response.status_code == 500: logger.info("This notifiction has been existed.\nSkip import this notifiction.") skip("This notification has been existed.\nSkip import this notifiction.") else: error("Alert Notification Import Error %s %s" % (response, response.text)) def templet_import(self, mode, ds_name): # import dashbord or datasource title = ds_name + ' ' + mode if mode == 'dashboard': get_api = ':3000/api/dashboards/uid/esgyndb' imp_url = self.url + ':3000/api/dashboards/db' elif mode == 'datasource': get_api = ':3000/api/datasources/name/%s' % ds_name.lower() imp_url = self.url + ':3000/api/datasources' log_output("Check %s" % title) check = self.switch_request("get", get_api) logger.info("%s %s" % (check, check.text)) if check.status_code == 200: logger.info("This %s has been existed.\nSkip import this %s." % (title, title)) skip("This %s has been existed.\nSkip import this %s." % (title, title)) elif check.status_code == 404: info("%s dosen't exist." % title) log_output("Start importing %s..." % title) data = open('%s_%s.json' % (ds_name.lower(), mode.lower()), 'rb') response = requests.post(imp_url, data=data, headers=self.headers) logger.info("%s %s" % (response, response.text)) if response.status_code == 200: logger.info("%s import successfully!" % title) info("%s import successfully!" % title) else: error("%s Import Error %s %s" % (mode, response, response.text)) elif check.status_code != 200 and check.status_code != 404: error("Check error %s %s" % (check, check.text)) def start_db(self): log_output("Start Dashboard") search_api = ':3000/api/search' search = self.switch_request("get", search_api) data = search.text.encode() data = json.loads(data) for d in data: if d["uid"] == "esgyndb": db_id = d["id"] break start_api = ':3000/api/user/stars/dashboard/' + str(db_id) start = self.switch_request("post", start_api) if start.status_code == 200: logger.info("%s %s" % (start, start.text)) info("Dashboard started") elif start.status_code == 500: logger.info("%s %s" % (start, start.text)) skip("This dashboard has been started.\nSkip this process.") else: error("Dashboard Start Error %s %s" % (start, start.text)) def set_smtp(self, sendmail, smtp_host, smtp_psword): data = """enabled=true host=%s user=%s password=%s from_address=%s from_name = Grafana """ % (smtp_host, sendmail, smtp_psword, sendmail) log_output("Set grafana.ini") if os.path.exists(GRA_CONFILE): confile = open(GRA_CONFILE).readlines() lines = [] config = ConfigParser.ConfigParser() config.read(GRA_CONFILE) item = config.items('smtp') for i in range(len(confile)): lines.append(confile[i]) if "[smtp]" in confile[i]: linenum = i if item: logger.info("Skip set grafana.ini. It has been changed.") skip("Skip set grafana.ini. It has been changed.") elif not item: lines.insert(linenum+1, data) s = ''.join(lines) with open(GRA_CONFILE, 'w') as confile: confile.write(s) p = subprocess.Popen("sudo service grafana-server restart", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) info("Set Grafana.ini Complete") else: error("%s doesn't exist" % GRA_CONFILE) def run():
try: set_logger(logger) option = get_options() print("\33[32m[Log file location]: %s \33[0m" % log_file) admin_user, admin_psword = load_user() grafana = Grafana(admin_user, admin_psword) if option.admin_psword: grafana.set_admin_psw(option.admin_psword) admin_psword = option.admin_psword if option.editor: option.editor = option.editor.split(',') if len(option.editor) == 3: editor, editor_psword, editor_email = option.editor grafana.set_editor(editor.strip(), editor_psword.strip(), editor_email.strip()) else: error("You need input email;host;psword(eg)") grafana.templet_import('datasource', 'Esgyn') grafana.templet_import('datasource', 'Loki') grafana.templet_import('dashboard', 'Esgyn') if option.smtp:
identifier_body
grafana.py
grafana administrator password.") parser.add_option("-e", "--editor", dest="editor", help="Create a user with editing authority. You need to enter username, password and email address \ (this mailbox will be used to receive alarm messages) in order and separated them by comma. \ i.e. \"esgyn,password,[email protected]\"") parser.add_option("-s", "--smtp", dest="smtp", help="Set the mailbox to send the alarm message. You need to enter email address, smtp server \ and password in order and separated them by comma. i.e. \"[email protected],smtp.qq.com,password\"") options, args = parser.parse_args() return options def format_output(text): num = len(text) + 4 print ('*' * num) print (' ' + text) print ('*' * num) def log_output(msg): logger.info("****%s****" % msg) format_output(msg) def skip(msg): print('\33[32m***[SKIP]: %s \33[0m' % msg) def info(msg): print('\33[33m***[INFO]: %s \33[0m' % msg) def error(msg, logout=True): print('\n\33[35m***[ERROR]: %s \33[0m' % msg) if logout: logger.error(msg) sys.exit(1) def load_user(): if os.path.exists(TMP_USERINFO): with open(TMP_USERINFO, "r") as user_info: userinfo = json.load(user_info) else: userinfo = {"user": "admin", "psword": "admin"} return userinfo["user"], userinfo["psword"] class Grafana(object): def __init__(self, admin_user, admin_psword): self.admin_user = admin_user self.admin_psword = admin_psword self.ip = socket.gethostbyname(socket.gethostname()) self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) self.headers = {"Content-Type": 'application/json', "Accept": 'application/json'} def switch_request(self, mode, api, data=""): url = self.url + api data = json.dumps(data) switcher = { "get": requests.get(url, headers=self.headers), "put": requests.put(url, data=data, headers=self.headers), "post": requests.post(url, data=data, headers=self.headers), "patch": requests.patch(url, data=data, headers=self.headers) } return switcher.get(mode, "Nothing") def set_admin_psw(self, new_admin_psword): log_output("Set Admin Password") psw_api = ':3000/api/user/password' data = {"oldPassword": self.admin_psword, "newPassword": new_admin_psword, "confirNew": new_admin_psword} put_psw = self.switch_request("put", psw_api, data) if put_psw.status_code == 200: logger.info("Admin password Updated! %s %s" % (put_psw, put_psw.text)) info("Admin password Updated!") self.admin_psword = new_admin_psword self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) else:
def set_editor(self, editor, editor_psword, email): log_output("Create Editor") editor_api = ':3000/api/admin/users' data = {"name": editor, "email": email, "login": editor, "password": editor_psword} editor = self.switch_request("post", editor_api, data) if editor.status_code == 200: editor_id = json.loads(editor.text)["id"] org_api = ':3000/api/org/users/' + str(editor_id) data = {"role": "Editor"} self.switch_request("patch", org_api, data) logger.info("Editor created successfully! %s %s" % (editor, editor.text)) info("Editor created successfully!") elif editor.status_code == 500: logger.info("This editor has been created.\nSkip create this editor.") skip("This editor has been created.\nSkip create this editor.") else: error("Editor created Error %s %s" % (editor, editor.text)) def notification_import(self, receive_addr): log_output("Start importing alert notification...") noti_api = ':3000/api/alert-notifications' data = {"sendReminder": False, "type": "email", "name": "Esgyndb Notification", "isDefault": False, "settings": {"addresses": ""}} data["settings"]["addresses"] = receive_addr response = self.switch_request("post", noti_api, data) if response.status_code == 200: logger.info("Alert notification import successfully!") info("Alert notification import successfully!") elif response.status_code == 500: logger.info("This notifiction has been existed.\nSkip import this notifiction.") skip("This notification has been existed.\nSkip import this notifiction.") else: error("Alert Notification Import Error %s %s" % (response, response.text)) def templet_import(self, mode, ds_name): # import dashbord or datasource title = ds_name + ' ' + mode if mode == 'dashboard': get_api = ':3000/api/dashboards/uid/esgyndb' imp_url = self.url + ':3000/api/dashboards/db' elif mode == 'datasource': get_api = ':3000/api/datasources/name/%s' % ds_name.lower() imp_url = self.url + ':3000/api/datasources' log_output("Check %s" % title) check = self.switch_request("get", get_api) logger.info("%s %s" % (check, check.text)) if check.status_code == 200: logger.info("This %s has been existed.\nSkip import this %s." % (title, title)) skip("This %s has been existed.\nSkip import this %s." % (title, title)) elif check.status_code == 404: info("%s dosen't exist." % title) log_output("Start importing %s..." % title) data = open('%s_%s.json' % (ds_name.lower(), mode.lower()), 'rb') response = requests.post(imp_url, data=data, headers=self.headers) logger.info("%s %s" % (response, response.text)) if response.status_code == 200: logger.info("%s import successfully!" % title) info("%s import successfully!" % title) else: error("%s Import Error %s %s" % (mode, response, response.text)) elif check.status_code != 200 and check.status_code != 404: error("Check error %s %s" % (check, check.text)) def start_db(self): log_output("Start Dashboard") search_api = ':3000/api/search' search = self.switch_request("get", search_api) data = search.text.encode() data = json.loads(data) for d in data: if d["uid"] == "esgyndb": db_id = d["id"] break start_api = ':3000/api/user/stars/dashboard/' + str(db_id) start = self.switch_request("post", start_api) if start.status_code == 200: logger.info("%s %s" % (start, start.text)) info("Dashboard started") elif start.status_code == 500: logger.info("%s %s" % (start, start.text)) skip("This dashboard has been started.\nSkip this process.") else: error("Dashboard Start Error %s %s" % (start, start.text)) def set_smtp(self, sendmail, smtp_host, smtp_psword): data = """enabled=true host=%s user=%s password=%s from_address=%s from_name = Grafana """ % (smtp_host, sendmail, smtp_psword, sendmail) log_output("Set grafana.ini") if os.path.exists(GRA_CONFILE): confile = open(GRA_CONFILE).readlines() lines = [] config = ConfigParser.ConfigParser() config.read(GRA_CONFILE) item = config.items('smtp') for i in range(len(confile)): lines.append(confile[i]) if "[smtp]" in confile[i]: linenum = i if item: logger.info("Skip set grafana.ini. It has been changed.") skip("Skip set grafana.ini. It has been changed.") elif not item: lines.insert(linenum+1, data) s = ''.join(lines) with open(GRA_CONFILE, 'w') as confile: confile.write(s) p = subprocess.Popen
error("Password Update Error %s %s" % (put_psw, put_psw.text))
conditional_block
grafana.py
grafana administrator password.") parser.add_option("-e", "--editor", dest="editor", help="Create a user with editing authority. You need to enter username, password and email address \ (this mailbox will be used to receive alarm messages) in order and separated them by comma. \ i.e. \"esgyn,password,[email protected]\"") parser.add_option("-s", "--smtp", dest="smtp", help="Set the mailbox to send the alarm message. You need to enter email address, smtp server \ and password in order and separated them by comma. i.e. \"[email protected],smtp.qq.com,password\"") options, args = parser.parse_args() return options def format_output(text): num = len(text) + 4 print ('*' * num) print (' ' + text) print ('*' * num) def log_output(msg): logger.info("****%s****" % msg) format_output(msg) def skip(msg): print('\33[32m***[SKIP]: %s \33[0m' % msg) def info(msg): print('\33[33m***[INFO]: %s \33[0m' % msg) def error(msg, logout=True): print('\n\33[35m***[ERROR]: %s \33[0m' % msg) if logout: logger.error(msg) sys.exit(1) def load_user(): if os.path.exists(TMP_USERINFO): with open(TMP_USERINFO, "r") as user_info: userinfo = json.load(user_info) else: userinfo = {"user": "admin", "psword": "admin"} return userinfo["user"], userinfo["psword"] class Grafana(object): def __init__(self, admin_user, admin_psword): self.admin_user = admin_user self.admin_psword = admin_psword self.ip = socket.gethostbyname(socket.gethostname()) self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) self.headers = {"Content-Type": 'application/json', "Accept": 'application/json'} def switch_request(self, mode, api, data=""): url = self.url + api data = json.dumps(data) switcher = { "get": requests.get(url, headers=self.headers), "put": requests.put(url, data=data, headers=self.headers), "post": requests.post(url, data=data, headers=self.headers), "patch": requests.patch(url, data=data, headers=self.headers) } return switcher.get(mode, "Nothing") def set_admin_psw(self, new_admin_psword): log_output("Set Admin Password") psw_api = ':3000/api/user/password' data = {"oldPassword": self.admin_psword, "newPassword": new_admin_psword, "confirNew": new_admin_psword} put_psw = self.switch_request("put", psw_api, data) if put_psw.status_code == 200: logger.info("Admin password Updated! %s %s" % (put_psw, put_psw.text)) info("Admin password Updated!") self.admin_psword = new_admin_psword self.url = 'http://%s:%s@%s' % (self.admin_user, self.admin_psword, self.ip) else: error("Password Update Error %s %s" % (put_psw, put_psw.text)) def set_editor(self, editor, editor_psword, email): log_output("Create Editor") editor_api = ':3000/api/admin/users' data = {"name": editor, "email": email, "login": editor, "password": editor_psword} editor = self.switch_request("post", editor_api, data) if editor.status_code == 200: editor_id = json.loads(editor.text)["id"] org_api = ':3000/api/org/users/' + str(editor_id) data = {"role": "Editor"} self.switch_request("patch", org_api, data) logger.info("Editor created successfully! %s %s" % (editor, editor.text)) info("Editor created successfully!") elif editor.status_code == 500: logger.info("This editor has been created.\nSkip create this editor.") skip("This editor has been created.\nSkip create this editor.") else: error("Editor created Error %s %s" % (editor, editor.text)) def notification_import(self, receive_addr): log_output("Start importing alert notification...") noti_api = ':3000/api/alert-notifications' data = {"sendReminder": False, "type": "email", "name": "Esgyndb Notification", "isDefault": False, "settings": {"addresses": ""}} data["settings"]["addresses"] = receive_addr response = self.switch_request("post", noti_api, data) if response.status_code == 200: logger.info("Alert notification import successfully!") info("Alert notification import successfully!") elif response.status_code == 500: logger.info("This notifiction has been existed.\nSkip import this notifiction.") skip("This notification has been existed.\nSkip import this notifiction.") else: error("Alert Notification Import Error %s %s" % (response, response.text)) def templet_import(self, mode, ds_name): # import dashbord or datasource title = ds_name + ' ' + mode if mode == 'dashboard': get_api = ':3000/api/dashboards/uid/esgyndb' imp_url = self.url + ':3000/api/dashboards/db' elif mode == 'datasource': get_api = ':3000/api/datasources/name/%s' % ds_name.lower() imp_url = self.url + ':3000/api/datasources' log_output("Check %s" % title) check = self.switch_request("get", get_api) logger.info("%s %s" % (check, check.text)) if check.status_code == 200: logger.info("This %s has been existed.\nSkip import this %s." % (title, title)) skip("This %s has been existed.\nSkip import this %s." % (title, title)) elif check.status_code == 404: info("%s dosen't exist." % title) log_output("Start importing %s..." % title) data = open('%s_%s.json' % (ds_name.lower(), mode.lower()), 'rb') response = requests.post(imp_url, data=data, headers=self.headers) logger.info("%s %s" % (response, response.text)) if response.status_code == 200: logger.info("%s import successfully!" % title) info("%s import successfully!" % title) else: error("%s Import Error %s %s" % (mode, response, response.text)) elif check.status_code != 200 and check.status_code != 404: error("Check error %s %s" % (check, check.text)) def
(self): log_output("Start Dashboard") search_api = ':3000/api/search' search = self.switch_request("get", search_api) data = search.text.encode() data = json.loads(data) for d in data: if d["uid"] == "esgyndb": db_id = d["id"] break start_api = ':3000/api/user/stars/dashboard/' + str(db_id) start = self.switch_request("post", start_api) if start.status_code == 200: logger.info("%s %s" % (start, start.text)) info("Dashboard started") elif start.status_code == 500: logger.info("%s %s" % (start, start.text)) skip("This dashboard has been started.\nSkip this process.") else: error("Dashboard Start Error %s %s" % (start, start.text)) def set_smtp(self, sendmail, smtp_host, smtp_psword): data = """enabled=true host=%s user=%s password=%s from_address=%s from_name = Grafana """ % (smtp_host, sendmail, smtp_psword, sendmail) log_output("Set grafana.ini") if os.path.exists(GRA_CONFILE): confile = open(GRA_CONFILE).readlines() lines = [] config = ConfigParser.ConfigParser() config.read(GRA_CONFILE) item = config.items('smtp') for i in range(len(confile)): lines.append(confile[i]) if "[smtp]" in confile[i]: linenum = i if item: logger.info("Skip set grafana.ini. It has been changed.") skip("Skip set grafana.ini. It has been changed.") elif not item: lines.insert(linenum+1, data) s = ''.join(lines) with open(GRA_CONFILE, 'w') as confile: confile.write(s) p = subprocess.Popen
start_db
identifier_name
WebRTCDatabase.ts
object, // D extends Diff<Data, Action> = Diff<Data, Action> // >(a: D, b: D) { // if (a.timestamp < b.timestamp) { // return 1; // } else if (a.timestamp > b.timestamp) { // return -1; // } else { // return 0; // } // } interface BaseDBMessage { type: string; } interface SendUpdate<Data extends object, Action extends object> extends BaseDBMessage {
updates: Diff<Data, Action>[]; } interface SendInternals<Data extends object, Action extends object> { type: "sendInternals"; connectionIDs: string[]; diffStack: Diff<Data, Action>[]; } interface RequestInternals { type: "requestInternals"; } interface SendConnections { type: "sendConnections"; connections: string[]; } type Message<Data extends object, Action extends object> = | SendUpdate<Data, Action> | SendInternals<Data, Action> | RequestInternals | SendConnections; type OnConnectCB = (conn: Conn) => void; type OnDisconnectCB = (conn: Conn) => void; type OnChangeCB<Data extends object> = (data: Data) => void; export class WebRTCDatabase<Data extends object, Action extends object> { private peer: PeerJS; private state: Data = {} as Data; private connections: Map<string, Conn> = new Map(); private diffStack: Diff<Data, Action>[] = []; private diffLookup: Set<string> = new Set(); private reducer: (data: Data, action: Action) => Data; private onConnectCallbacks: Set<OnConnectCB> = new Set(); private onDisconnectCallbacks: Set<OnDisconnectCB> = new Set(); private onChangeCallbacks: Set<OnChangeCB<Data>> = new Set(); public get id(): string { return this.peer.id; } constructor( initialState: Data, reducer: (data: Data, action: Action) => Data, peer: PeerJS = new PeerJS() ) { this.peer = peer; this.peer.on("connection", (conn) => { this.setupConnection(conn); }); this.reducer = reducer; this.setState(initialState); } /** * A function for managing state updates, so we can make sure * we're grouping all necessary procedures */ private setState(state: Data) { this.state = state; this.onChangeCallbacks.forEach((cb) => { cb(state); }); } /** * this is a utility function so that we can share logic for * setting up connections */ private async setupConnection(conn: DataConnection): Promise<Conn> { return new Promise<Conn>((resolve, reject) => { // setup listeners conn.on("data", async (data) => { await this.setupMessageListener( conn.peer, data as Message<Data, Action> ); }); conn.on("error", (err) => { reject(err); }); conn.on("close", () => { console.log(`See ya ${conn.peer}`); const fullConn = this.connections.get(conn.peer) as Conn; this.onDisconnectCallbacks.forEach((cb) => { cb(fullConn); }); this.connections.delete(conn.peer); }); // add this to our list of connections const fullConn = { lastUpdated: null, connection: conn, }; this.connections.set(conn.peer, fullConn); this.onConnectCallbacks.forEach((cb) => { cb(fullConn); }); resolve(fullConn); }); } private async setupMessageListener( id: string, message: Message<Data, Action> ) { switch (message.type) { case "sendUpdate": { // this should be an array of diffs in chronological order const diffs = message.updates; // filter out diffs that already exist const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); if (filtered.length > 0) { // add these new diffs to the lookup filtered.forEach((diff) => { this.diffLookup.add(diff.id); }); // apply only the "new" diffs this.applyDiffs(filtered); } break; } case "requestInternals": { if (this.connections.has(id)) { const conn = this.connections.get(id) as Conn; const connectionIDs = Array.from(this.connections.keys()); const diffStack = this.diffStack; this.message(conn, { type: "sendInternals", connectionIDs, diffStack, }); } break; } case "sendInternals": { // connect to all the ids passed const connectPromises = message.connectionIDs .filter((id) => id !== this.id) .map((id) => this.connect(id)); await Promise.all(connectPromises); // update the state this.applyDiffs(message.diffStack); message.diffStack.forEach((diff) => { this.diffLookup.add(diff.id); }); break; } case "sendConnections": { const connPromises = message.connections .filter((id) => !this.connections.has(id)) .map((id) => this.connect(id)); await Promise.all(connPromises); } } } /** * Utility function for getting the diffs to apply if * there are no diffs in the diff stack. Used as part of * `applyDiffs` */ private getDiffsToApplyEmpty( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { return diffs.sort(sortDiffLeastToGreatest); } /** * Utility function for getting diffs if there is at least * one diff in the diff stack. Used as part of `applyDiffs` */ private getDiffsToApplyMany( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { // first sort the diffs we want to apply in order of // least-recent-to-most-recent. This way, when we start // reconstructing the diffStack, we only have to rewind // the state in one big update method, rather than for // each diff diffs.sort(sortDiffLeastToGreatest); // here we construct a list of diffs to apply. We take the head // of the sorted diffs and pop off diffs in the stack until we // find an entry with a timestamp that's less than the head. // Once we have that base list, we can add the rest of our // timestamps and sort them from least to greatest const [head, ...rest] = diffs; const diffsToApply: Diff<Data, Action>[] = []; while ( this.diffStack.length > 0 && this.diffStack[0].timestamp > head.timestamp ) { const entry = this.diffStack.shift() as Diff<Data, Action>; /** * @TODO - Maybe find a safer way to do this that doesn't involve * mutating state within this function? A better solution may be * to create a "getRewoundState" method that finds the state * before a given timestamp * * ~reccanti 7/5/2021 */ this.state = entry.prevState; diffsToApply.push(entry); } diffsToApply.push(head); const sortedDiffsToApply = diffsToApply .concat(...rest) .sort(sortDiffLeastToGreatest); return sortedDiffsToApply; } /** * In the event that we get a batch of diffs that occurred * out-of-order, this function figures out how to reverse * the state and apply diffs sequentially */ private applyDiffs(diffs: Diff<Data, Action>[]) { // const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); // get the number of diffs we need to apply to the diff stack let diffsToApply: Diff<Data, Action>[] = []; if (this.diffStack.length === 0) { diffsToApply = this.getDiffsToApplyEmpty(diffs); } else { diffsToApply = this.getDiffsToApplyMany(diffs); } // now that we have a list of diffs to apply: // 1. cycle through them and apply the action to the reducer // 2. add the diff back to the diffStack let prevState = this.getState(); diffsToApply.forEach((diff) => { diff.prevState = prevState; prevState = this.reducer(prevState, diff.action); this.diffStack.unshift(diff); }); this.setState(prevState); } /** * Utility function to get all the diffs since * a specific timestamp */ private getAllDiffsSince(timestamp: number): Diff<Data, Action>[] { if (this.diffStack.length === 0) { return []; } const diffs: Diff<Data, Action>[] = []; let [cur, ...rest] = this.diffStack; while (cur.timestamp > timestamp) { diffs.push(cur); [cur, ...rest] = rest; } return diffs; } /** * Figure out what diffs need to be applied to all * the connections */ private
type: "sendUpdate";
random_line_split
WebRTCDatabase.ts
object, // D extends Diff<Data, Action> = Diff<Data, Action> // >(a: D, b: D) { // if (a.timestamp < b.timestamp) { // return 1; // } else if (a.timestamp > b.timestamp) { // return -1; // } else { // return 0; // } // } interface BaseDBMessage { type: string; } interface SendUpdate<Data extends object, Action extends object> extends BaseDBMessage { type: "sendUpdate"; updates: Diff<Data, Action>[]; } interface SendInternals<Data extends object, Action extends object> { type: "sendInternals"; connectionIDs: string[]; diffStack: Diff<Data, Action>[]; } interface RequestInternals { type: "requestInternals"; } interface SendConnections { type: "sendConnections"; connections: string[]; } type Message<Data extends object, Action extends object> = | SendUpdate<Data, Action> | SendInternals<Data, Action> | RequestInternals | SendConnections; type OnConnectCB = (conn: Conn) => void; type OnDisconnectCB = (conn: Conn) => void; type OnChangeCB<Data extends object> = (data: Data) => void; export class WebRTCDatabase<Data extends object, Action extends object> { private peer: PeerJS; private state: Data = {} as Data; private connections: Map<string, Conn> = new Map(); private diffStack: Diff<Data, Action>[] = []; private diffLookup: Set<string> = new Set(); private reducer: (data: Data, action: Action) => Data; private onConnectCallbacks: Set<OnConnectCB> = new Set(); private onDisconnectCallbacks: Set<OnDisconnectCB> = new Set(); private onChangeCallbacks: Set<OnChangeCB<Data>> = new Set(); public get id(): string { return this.peer.id; } constructor( initialState: Data, reducer: (data: Data, action: Action) => Data, peer: PeerJS = new PeerJS() ) { this.peer = peer; this.peer.on("connection", (conn) => { this.setupConnection(conn); }); this.reducer = reducer; this.setState(initialState); } /** * A function for managing state updates, so we can make sure * we're grouping all necessary procedures */ private setState(state: Data) { this.state = state; this.onChangeCallbacks.forEach((cb) => { cb(state); }); } /** * this is a utility function so that we can share logic for * setting up connections */ private async setupConnection(conn: DataConnection): Promise<Conn> { return new Promise<Conn>((resolve, reject) => { // setup listeners conn.on("data", async (data) => { await this.setupMessageListener( conn.peer, data as Message<Data, Action> ); }); conn.on("error", (err) => { reject(err); }); conn.on("close", () => { console.log(`See ya ${conn.peer}`); const fullConn = this.connections.get(conn.peer) as Conn; this.onDisconnectCallbacks.forEach((cb) => { cb(fullConn); }); this.connections.delete(conn.peer); }); // add this to our list of connections const fullConn = { lastUpdated: null, connection: conn, }; this.connections.set(conn.peer, fullConn); this.onConnectCallbacks.forEach((cb) => { cb(fullConn); }); resolve(fullConn); }); } private async setupMessageListener( id: string, message: Message<Data, Action> ) { switch (message.type) { case "sendUpdate": { // this should be an array of diffs in chronological order const diffs = message.updates; // filter out diffs that already exist const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); if (filtered.length > 0) { // add these new diffs to the lookup filtered.forEach((diff) => { this.diffLookup.add(diff.id); }); // apply only the "new" diffs this.applyDiffs(filtered); } break; } case "requestInternals": { if (this.connections.has(id)) { const conn = this.connections.get(id) as Conn; const connectionIDs = Array.from(this.connections.keys()); const diffStack = this.diffStack; this.message(conn, { type: "sendInternals", connectionIDs, diffStack, }); } break; } case "sendInternals": { // connect to all the ids passed const connectPromises = message.connectionIDs .filter((id) => id !== this.id) .map((id) => this.connect(id)); await Promise.all(connectPromises); // update the state this.applyDiffs(message.diffStack); message.diffStack.forEach((diff) => { this.diffLookup.add(diff.id); }); break; } case "sendConnections": { const connPromises = message.connections .filter((id) => !this.connections.has(id)) .map((id) => this.connect(id)); await Promise.all(connPromises); } } } /** * Utility function for getting the diffs to apply if * there are no diffs in the diff stack. Used as part of * `applyDiffs` */ private
( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { return diffs.sort(sortDiffLeastToGreatest); } /** * Utility function for getting diffs if there is at least * one diff in the diff stack. Used as part of `applyDiffs` */ private getDiffsToApplyMany( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { // first sort the diffs we want to apply in order of // least-recent-to-most-recent. This way, when we start // reconstructing the diffStack, we only have to rewind // the state in one big update method, rather than for // each diff diffs.sort(sortDiffLeastToGreatest); // here we construct a list of diffs to apply. We take the head // of the sorted diffs and pop off diffs in the stack until we // find an entry with a timestamp that's less than the head. // Once we have that base list, we can add the rest of our // timestamps and sort them from least to greatest const [head, ...rest] = diffs; const diffsToApply: Diff<Data, Action>[] = []; while ( this.diffStack.length > 0 && this.diffStack[0].timestamp > head.timestamp ) { const entry = this.diffStack.shift() as Diff<Data, Action>; /** * @TODO - Maybe find a safer way to do this that doesn't involve * mutating state within this function? A better solution may be * to create a "getRewoundState" method that finds the state * before a given timestamp * * ~reccanti 7/5/2021 */ this.state = entry.prevState; diffsToApply.push(entry); } diffsToApply.push(head); const sortedDiffsToApply = diffsToApply .concat(...rest) .sort(sortDiffLeastToGreatest); return sortedDiffsToApply; } /** * In the event that we get a batch of diffs that occurred * out-of-order, this function figures out how to reverse * the state and apply diffs sequentially */ private applyDiffs(diffs: Diff<Data, Action>[]) { // const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); // get the number of diffs we need to apply to the diff stack let diffsToApply: Diff<Data, Action>[] = []; if (this.diffStack.length === 0) { diffsToApply = this.getDiffsToApplyEmpty(diffs); } else { diffsToApply = this.getDiffsToApplyMany(diffs); } // now that we have a list of diffs to apply: // 1. cycle through them and apply the action to the reducer // 2. add the diff back to the diffStack let prevState = this.getState(); diffsToApply.forEach((diff) => { diff.prevState = prevState; prevState = this.reducer(prevState, diff.action); this.diffStack.unshift(diff); }); this.setState(prevState); } /** * Utility function to get all the diffs since * a specific timestamp */ private getAllDiffsSince(timestamp: number): Diff<Data, Action>[] { if (this.diffStack.length === 0) { return []; } const diffs: Diff<Data, Action>[] = []; let [cur, ...rest] = this.diffStack; while (cur.timestamp > timestamp) { diffs.push(cur); [cur, ...rest] = rest; } return diffs; } /** * Figure out what diffs need to be applied to all * the connections */
getDiffsToApplyEmpty
identifier_name
WebRTCDatabase.ts
object, // D extends Diff<Data, Action> = Diff<Data, Action> // >(a: D, b: D) { // if (a.timestamp < b.timestamp) { // return 1; // } else if (a.timestamp > b.timestamp) { // return -1; // } else { // return 0; // } // } interface BaseDBMessage { type: string; } interface SendUpdate<Data extends object, Action extends object> extends BaseDBMessage { type: "sendUpdate"; updates: Diff<Data, Action>[]; } interface SendInternals<Data extends object, Action extends object> { type: "sendInternals"; connectionIDs: string[]; diffStack: Diff<Data, Action>[]; } interface RequestInternals { type: "requestInternals"; } interface SendConnections { type: "sendConnections"; connections: string[]; } type Message<Data extends object, Action extends object> = | SendUpdate<Data, Action> | SendInternals<Data, Action> | RequestInternals | SendConnections; type OnConnectCB = (conn: Conn) => void; type OnDisconnectCB = (conn: Conn) => void; type OnChangeCB<Data extends object> = (data: Data) => void; export class WebRTCDatabase<Data extends object, Action extends object> { private peer: PeerJS; private state: Data = {} as Data; private connections: Map<string, Conn> = new Map(); private diffStack: Diff<Data, Action>[] = []; private diffLookup: Set<string> = new Set(); private reducer: (data: Data, action: Action) => Data; private onConnectCallbacks: Set<OnConnectCB> = new Set(); private onDisconnectCallbacks: Set<OnDisconnectCB> = new Set(); private onChangeCallbacks: Set<OnChangeCB<Data>> = new Set(); public get id(): string { return this.peer.id; } constructor( initialState: Data, reducer: (data: Data, action: Action) => Data, peer: PeerJS = new PeerJS() ) { this.peer = peer; this.peer.on("connection", (conn) => { this.setupConnection(conn); }); this.reducer = reducer; this.setState(initialState); } /** * A function for managing state updates, so we can make sure * we're grouping all necessary procedures */ private setState(state: Data) { this.state = state; this.onChangeCallbacks.forEach((cb) => { cb(state); }); } /** * this is a utility function so that we can share logic for * setting up connections */ private async setupConnection(conn: DataConnection): Promise<Conn> { return new Promise<Conn>((resolve, reject) => { // setup listeners conn.on("data", async (data) => { await this.setupMessageListener( conn.peer, data as Message<Data, Action> ); }); conn.on("error", (err) => { reject(err); }); conn.on("close", () => { console.log(`See ya ${conn.peer}`); const fullConn = this.connections.get(conn.peer) as Conn; this.onDisconnectCallbacks.forEach((cb) => { cb(fullConn); }); this.connections.delete(conn.peer); }); // add this to our list of connections const fullConn = { lastUpdated: null, connection: conn, }; this.connections.set(conn.peer, fullConn); this.onConnectCallbacks.forEach((cb) => { cb(fullConn); }); resolve(fullConn); }); } private async setupMessageListener( id: string, message: Message<Data, Action> ) { switch (message.type) { case "sendUpdate": { // this should be an array of diffs in chronological order const diffs = message.updates; // filter out diffs that already exist const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); if (filtered.length > 0) { // add these new diffs to the lookup filtered.forEach((diff) => { this.diffLookup.add(diff.id); }); // apply only the "new" diffs this.applyDiffs(filtered); } break; } case "requestInternals": { if (this.connections.has(id)) { const conn = this.connections.get(id) as Conn; const connectionIDs = Array.from(this.connections.keys()); const diffStack = this.diffStack; this.message(conn, { type: "sendInternals", connectionIDs, diffStack, }); } break; } case "sendInternals": { // connect to all the ids passed const connectPromises = message.connectionIDs .filter((id) => id !== this.id) .map((id) => this.connect(id)); await Promise.all(connectPromises); // update the state this.applyDiffs(message.diffStack); message.diffStack.forEach((diff) => { this.diffLookup.add(diff.id); }); break; } case "sendConnections": { const connPromises = message.connections .filter((id) => !this.connections.has(id)) .map((id) => this.connect(id)); await Promise.all(connPromises); } } } /** * Utility function for getting the diffs to apply if * there are no diffs in the diff stack. Used as part of * `applyDiffs` */ private getDiffsToApplyEmpty( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { return diffs.sort(sortDiffLeastToGreatest); } /** * Utility function for getting diffs if there is at least * one diff in the diff stack. Used as part of `applyDiffs` */ private getDiffsToApplyMany( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { // first sort the diffs we want to apply in order of // least-recent-to-most-recent. This way, when we start // reconstructing the diffStack, we only have to rewind // the state in one big update method, rather than for // each diff diffs.sort(sortDiffLeastToGreatest); // here we construct a list of diffs to apply. We take the head // of the sorted diffs and pop off diffs in the stack until we // find an entry with a timestamp that's less than the head. // Once we have that base list, we can add the rest of our // timestamps and sort them from least to greatest const [head, ...rest] = diffs; const diffsToApply: Diff<Data, Action>[] = []; while ( this.diffStack.length > 0 && this.diffStack[0].timestamp > head.timestamp ) { const entry = this.diffStack.shift() as Diff<Data, Action>; /** * @TODO - Maybe find a safer way to do this that doesn't involve * mutating state within this function? A better solution may be * to create a "getRewoundState" method that finds the state * before a given timestamp * * ~reccanti 7/5/2021 */ this.state = entry.prevState; diffsToApply.push(entry); } diffsToApply.push(head); const sortedDiffsToApply = diffsToApply .concat(...rest) .sort(sortDiffLeastToGreatest); return sortedDiffsToApply; } /** * In the event that we get a batch of diffs that occurred * out-of-order, this function figures out how to reverse * the state and apply diffs sequentially */ private applyDiffs(diffs: Diff<Data, Action>[]) { // const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); // get the number of diffs we need to apply to the diff stack let diffsToApply: Diff<Data, Action>[] = []; if (this.diffStack.length === 0)
else { diffsToApply = this.getDiffsToApplyMany(diffs); } // now that we have a list of diffs to apply: // 1. cycle through them and apply the action to the reducer // 2. add the diff back to the diffStack let prevState = this.getState(); diffsToApply.forEach((diff) => { diff.prevState = prevState; prevState = this.reducer(prevState, diff.action); this.diffStack.unshift(diff); }); this.setState(prevState); } /** * Utility function to get all the diffs since * a specific timestamp */ private getAllDiffsSince(timestamp: number): Diff<Data, Action>[] { if (this.diffStack.length === 0) { return []; } const diffs: Diff<Data, Action>[] = []; let [cur, ...rest] = this.diffStack; while (cur.timestamp > timestamp) { diffs.push(cur); [cur, ...rest] = rest; } return diffs; } /** * Figure out what diffs need to be applied to all * the connections */
{ diffsToApply = this.getDiffsToApplyEmpty(diffs); }
conditional_block
WebRTCDatabase.ts
BaseDBMessage { type: string; } interface SendUpdate<Data extends object, Action extends object> extends BaseDBMessage { type: "sendUpdate"; updates: Diff<Data, Action>[]; } interface SendInternals<Data extends object, Action extends object> { type: "sendInternals"; connectionIDs: string[]; diffStack: Diff<Data, Action>[]; } interface RequestInternals { type: "requestInternals"; } interface SendConnections { type: "sendConnections"; connections: string[]; } type Message<Data extends object, Action extends object> = | SendUpdate<Data, Action> | SendInternals<Data, Action> | RequestInternals | SendConnections; type OnConnectCB = (conn: Conn) => void; type OnDisconnectCB = (conn: Conn) => void; type OnChangeCB<Data extends object> = (data: Data) => void; export class WebRTCDatabase<Data extends object, Action extends object> { private peer: PeerJS; private state: Data = {} as Data; private connections: Map<string, Conn> = new Map(); private diffStack: Diff<Data, Action>[] = []; private diffLookup: Set<string> = new Set(); private reducer: (data: Data, action: Action) => Data; private onConnectCallbacks: Set<OnConnectCB> = new Set(); private onDisconnectCallbacks: Set<OnDisconnectCB> = new Set(); private onChangeCallbacks: Set<OnChangeCB<Data>> = new Set(); public get id(): string { return this.peer.id; } constructor( initialState: Data, reducer: (data: Data, action: Action) => Data, peer: PeerJS = new PeerJS() ) { this.peer = peer; this.peer.on("connection", (conn) => { this.setupConnection(conn); }); this.reducer = reducer; this.setState(initialState); } /** * A function for managing state updates, so we can make sure * we're grouping all necessary procedures */ private setState(state: Data) { this.state = state; this.onChangeCallbacks.forEach((cb) => { cb(state); }); } /** * this is a utility function so that we can share logic for * setting up connections */ private async setupConnection(conn: DataConnection): Promise<Conn> { return new Promise<Conn>((resolve, reject) => { // setup listeners conn.on("data", async (data) => { await this.setupMessageListener( conn.peer, data as Message<Data, Action> ); }); conn.on("error", (err) => { reject(err); }); conn.on("close", () => { console.log(`See ya ${conn.peer}`); const fullConn = this.connections.get(conn.peer) as Conn; this.onDisconnectCallbacks.forEach((cb) => { cb(fullConn); }); this.connections.delete(conn.peer); }); // add this to our list of connections const fullConn = { lastUpdated: null, connection: conn, }; this.connections.set(conn.peer, fullConn); this.onConnectCallbacks.forEach((cb) => { cb(fullConn); }); resolve(fullConn); }); } private async setupMessageListener( id: string, message: Message<Data, Action> ) { switch (message.type) { case "sendUpdate": { // this should be an array of diffs in chronological order const diffs = message.updates; // filter out diffs that already exist const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); if (filtered.length > 0) { // add these new diffs to the lookup filtered.forEach((diff) => { this.diffLookup.add(diff.id); }); // apply only the "new" diffs this.applyDiffs(filtered); } break; } case "requestInternals": { if (this.connections.has(id)) { const conn = this.connections.get(id) as Conn; const connectionIDs = Array.from(this.connections.keys()); const diffStack = this.diffStack; this.message(conn, { type: "sendInternals", connectionIDs, diffStack, }); } break; } case "sendInternals": { // connect to all the ids passed const connectPromises = message.connectionIDs .filter((id) => id !== this.id) .map((id) => this.connect(id)); await Promise.all(connectPromises); // update the state this.applyDiffs(message.diffStack); message.diffStack.forEach((diff) => { this.diffLookup.add(diff.id); }); break; } case "sendConnections": { const connPromises = message.connections .filter((id) => !this.connections.has(id)) .map((id) => this.connect(id)); await Promise.all(connPromises); } } } /** * Utility function for getting the diffs to apply if * there are no diffs in the diff stack. Used as part of * `applyDiffs` */ private getDiffsToApplyEmpty( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { return diffs.sort(sortDiffLeastToGreatest); } /** * Utility function for getting diffs if there is at least * one diff in the diff stack. Used as part of `applyDiffs` */ private getDiffsToApplyMany( diffs: Diff<Data, Action>[] ): Diff<Data, Action>[] { // first sort the diffs we want to apply in order of // least-recent-to-most-recent. This way, when we start // reconstructing the diffStack, we only have to rewind // the state in one big update method, rather than for // each diff diffs.sort(sortDiffLeastToGreatest); // here we construct a list of diffs to apply. We take the head // of the sorted diffs and pop off diffs in the stack until we // find an entry with a timestamp that's less than the head. // Once we have that base list, we can add the rest of our // timestamps and sort them from least to greatest const [head, ...rest] = diffs; const diffsToApply: Diff<Data, Action>[] = []; while ( this.diffStack.length > 0 && this.diffStack[0].timestamp > head.timestamp ) { const entry = this.diffStack.shift() as Diff<Data, Action>; /** * @TODO - Maybe find a safer way to do this that doesn't involve * mutating state within this function? A better solution may be * to create a "getRewoundState" method that finds the state * before a given timestamp * * ~reccanti 7/5/2021 */ this.state = entry.prevState; diffsToApply.push(entry); } diffsToApply.push(head); const sortedDiffsToApply = diffsToApply .concat(...rest) .sort(sortDiffLeastToGreatest); return sortedDiffsToApply; } /** * In the event that we get a batch of diffs that occurred * out-of-order, this function figures out how to reverse * the state and apply diffs sequentially */ private applyDiffs(diffs: Diff<Data, Action>[]) { // const filtered = diffs.filter((diff) => !this.diffLookup.has(diff.id)); // get the number of diffs we need to apply to the diff stack let diffsToApply: Diff<Data, Action>[] = []; if (this.diffStack.length === 0) { diffsToApply = this.getDiffsToApplyEmpty(diffs); } else { diffsToApply = this.getDiffsToApplyMany(diffs); } // now that we have a list of diffs to apply: // 1. cycle through them and apply the action to the reducer // 2. add the diff back to the diffStack let prevState = this.getState(); diffsToApply.forEach((diff) => { diff.prevState = prevState; prevState = this.reducer(prevState, diff.action); this.diffStack.unshift(diff); }); this.setState(prevState); } /** * Utility function to get all the diffs since * a specific timestamp */ private getAllDiffsSince(timestamp: number): Diff<Data, Action>[] { if (this.diffStack.length === 0) { return []; } const diffs: Diff<Data, Action>[] = []; let [cur, ...rest] = this.diffStack; while (cur.timestamp > timestamp) { diffs.push(cur); [cur, ...rest] = rest; } return diffs; } /** * Figure out what diffs need to be applied to all * the connections */ private syncConnections()
{ this.connections.forEach((conn) => { let diffs: Diff<Data, Action>[] = []; if (conn.lastUpdated) { diffs = this.getAllDiffsSince(conn.lastUpdated); } else { diffs = [...this.diffStack].reverse(); } this.message(conn, { type: "sendUpdate", updates: diffs }); }); }
identifier_body
john64.py
there, save that one whereinto his disciples were entered, and that Jesus went not with his disciples into the boat, but that his disciples were gone away alone; 23 (Howbeit there came other boats from Tiberias nigh unto the place where they did eat bread, after that the Lord had given thanks:) 24 When the people therefore saw that Jesus was not there, neither his disciples, they also took shipping, and came to Capernaum, seeking for Jesus. 25 And when they had found him on the other side of the sea, they said unto him, Rabbi, when camest thou hither? 26 Jesus answered them and said, Verily, verily, I say unto you, Ye seek me, not because ye saw the miracles, but because ye did eat of the loaves, and were filled. 27 Labour not for the meat which perisheth, but for that meat which endureth unto everlasting life, which the Son of man shall give unto you: for him hath God the Father sealed. 28 Then said they unto him, What shall we do, that we might work the works of God? 29 Jesus answered and said unto them, This is the work of God, that ye believe on him whom he hath sent. 30 They said therefore unto him, What sign shewest thou then, that we may see, and believe thee? what dost thou work? 31 Our fathers did eat manna in the desert; as it is written, He gave them bread from heaven to eat. 32 Then Jesus said unto them, Verily, verily, I say unto you, Moses gave you not that bread from heaven; but my Father giveth you the true bread from heaven. 33 For the bread of God is he which cometh down from heaven, and giveth life unto the world. 34 Then said they unto him, Lord, evermore give us this bread. 35 And Jesus said unto them, I am the bread of life: he that cometh to me shall never hunger; and he that believeth on me shall never thirst. 36 But I said unto you, That ye also have seen me, and believe not. 37 All that the Father giveth me shall come to me; and him that cometh to me I will in no wise cast out. 38 For I came down from heaven, not to do mine own will, but the will of him that sent me. 39 And this is the Father's will which hath sent me, that of all which he hath given me I should lose nothing, but should raise it up again at the last day. 40 And this is the will of him that sent me, that every one which seeth the Son, and believeth on him, may have everlasting life: and I will raise him up at the last day. 41 The Jews then murmured at him, because he said, I am the bread which came down from heaven. 42 And they said, Is not this Jesus, the son of Joseph, whose father and mother we know? how is it then that he saith, I came down from heaven? 43 Jesus therefore answered and said unto them, Murmur not among yourselves. 44 No man can come to me, except the Father which hath sent me draw him: and I will raise him up at the last day. 45 It is written in the prophets, And they shall be all taught of God. Every man therefore that hath heard, and hath learned of the Father, cometh unto me. 46 Not that any man hath seen the Father, save he which is of God, he hath seen the Father. 47 Verily, verily, I say unto you, He that believeth on me hath everlasting life. 48 I am that bread of life. 49 Your fathers did eat manna in the wilderness, and are dead. 50 This is the bread which cometh down from heaven, that a man may eat thereof, and not die. 51 I am the living bread which came down from heaven: if any man eat of this bread, he shall live for ever: and the bread that I will give is my flesh, which I will give for the life of the world. 52 The Jews therefore strove among themselves, saying, How can this man give us his flesh to eat? 53 Then Jesus said unto them, Verily, verily, I say unto you, Except ye eat the flesh of the Son of man, and drink his blood, ye have no life in you. 54 Whoso eateth my flesh, and drinketh my blood, hath eternal life; and I will raise him up at the last day. 55 For my flesh is meat indeed, and my blood is drink indeed. 56 He that eateth my flesh, and drinketh my blood, dwelleth in me, and I in him. 57 As the living Father hath sent me, and I live by the Father: so he that eateth me, even he shall live by me. 58 This is that bread which came down from heaven: not as your fathers did eat manna, and are dead: he that eateth of this bread shall live for ever. 59 These things said he in the synagogue, as he taught in Capernaum. 60 Many therefore of his disciples, when they had heard this, said, This is an hard saying; who can hear it? 61 When Jesus knew in himself that his disciples murmured at it, he said unto them, Doth this offend you? 62 What and if ye shall see the Son of man ascend up where he was before? 63 It is the spirit that quickeneth; the flesh profiteth nothing: the words that I speak unto you, they are spirit, and they are life. 64 But there are some of you that believe not. For Jesus knew from the beginning who they were that believed not, and who should betray him. 65 And he said, Therefore said I unto you, that no man can come unto me, except it were given unto him of my Father. 66 From that time many of his disciples went back, and walked no more with him. 67 Then said Jesus unto the twelve, Will ye also go away? 68 Then Simon Peter answered him, Lord, to whom shall we go? thou hast the words of eternal life. 69 And we believe and are sure that thou art that Christ, the Son of the living God. 70 Jesus answered them, Have not I chosen you twelve, and one of you is a devil? 71 He spake of Judas Iscariot the son of Simon: for he it was that should betray him, being one of the twelve. >>> John[6:10].vn() 26268 >>> John.vn() 26046 >>> 26268-26046+1 223 >>> pf(223) Counter({223: 1}) >>> np(223) 48 >>> John.vc() 879 >>> (Matthew-John).vc() 3779 >>> pf(_) Counter({3779: 1}) >>> np(3779) 526 >>> John[6:4].vn() 26262 >>> John[1:1]-John[6:4] John 1:1-6:4 (217 verses) >>> Matthew-John[6:4] Matthew 1:1-John 6:4 (3117 verses) >>> pf(26262) Counter({3: 2, 2: 1, 1459: 1}) >>> >>> b/'five'/'loaves' 1 Samuel 21:3;25:18;Matthew 14:17,19;16:9;Mark 6:38,41,44;8:19;Luke 9:13,16;John 6:9,13 (13 verses) >>> John[6:9].vn() 26267 >>> ISamuel[21:3].vn() 7776 >>> 26267-7776 18491 >>> p(pf(_)) Counter({41: 2, 11: 1}) >>> >>> _+4 18495 >>> John[7] John 7:1-53 (53 verses) >>> p(_) John 7 1 After these things Jesus walked in Galilee: for he would not walk in Jewry, because the Jews sought to kill him. 2 Now the Jew's feast of tabernacles was at hand. 3 His brethren therefore said unto him, Depart hence, and go into Judaea, that thy disciples also may see the works that thou doest. 4 For there is no man that doeth any thing in secret, and he himself seeketh to be known openly. If thou do these things, shew thyself to the world. 5 For neither did his brethren believe in him. 6 Then Jesus said unto them, My time is not yet come: but your time is alway ready. 7 The world cannot hate you; but me it hateth, because I testify of it, that the works thereof are evil. 8 Go ye
them, It is I; be not afraid. 21 Then they willingly received him into the ship: and immediately the ship was at the land whither they went. 22 The day following, when the people which stood on the other side of the sea saw that there was none other boat
conditional_block
john64.py
26268-26046+1 223 >>> pf(223) Counter({223: 1}) >>> np(223) 48 >>> John.vc() 879 >>> (Matthew-John).vc() 3779 >>> pf(_) Counter({3779: 1}) >>> np(3779) 526 >>> John[6:4].vn() 26262 >>> John[1:1]-John[6:4] John 1:1-6:4 (217 verses) >>> Matthew-John[6:4] Matthew 1:1-John 6:4 (3117 verses) >>> pf(26262) Counter({3: 2, 2: 1, 1459: 1}) >>> >>> b/'five'/'loaves' 1 Samuel 21:3;25:18;Matthew 14:17,19;16:9;Mark 6:38,41,44;8:19;Luke 9:13,16;John 6:9,13 (13 verses) >>> John[6:9].vn() 26267 >>> ISamuel[21:3].vn() 7776 >>> 26267-7776 18491 >>> p(pf(_)) Counter({41: 2, 11: 1}) >>> >>> _+4 18495 >>> John[7] John 7:1-53 (53 verses) >>> p(_) John 7 1 After these things Jesus walked in Galilee: for he would not walk in Jewry, because the Jews sought to kill him. 2 Now the Jew's feast of tabernacles was at hand. 3 His brethren therefore said unto him, Depart hence, and go into Judaea, that thy disciples also may see the works that thou doest. 4 For there is no man that doeth any thing in secret, and he himself seeketh to be known openly. If thou do these things, shew thyself to the world. 5 For neither did his brethren believe in him. 6 Then Jesus said unto them, My time is not yet come: but your time is alway ready. 7 The world cannot hate you; but me it hateth, because I testify of it, that the works thereof are evil. 8 Go ye up unto this feast: I go not up yet unto this feast: for my time is not yet full come. 9 When he had said these words unto them, he abode still in Galilee. 10 But when his brethren were gone up, then went he also up unto the feast, not openly, but as it were in secret. 11 Then the Jews sought him at the feast, and said, Where is he? 12 And there was much murmuring among the people concerning him: for some said, He is a good man: others said, Nay; but he deceiveth the people. 13 Howbeit no man spake openly of him for fear of the Jews. 14 Now about the midst of the feast Jesus went up into the temple, and taught. 15 And the Jews marvelled, saying, How knoweth this man letters, having never learned? 16 Jesus answered them, and said, My doctrine is not mine, but his that sent me. 17 If any man will do his will, he shall know of the doctrine, whether it be of God, or whether I speak of myself. 18 He that speaketh of himself seeketh his own glory: but he that seeketh his glory that sent him, the same is true, and no unrighteousness is in him. 19 Did not Moses give you the law, and yet none of you keepeth the law? Why go ye about to kill me? 20 The people answered and said, Thou hast a devil: who goeth about to kill thee? 21 Jesus answered and said unto them, I have done one work, and ye all marvel. 22 Moses therefore gave unto you circumcision; (not because it is of Moses, but of the fathers;) and ye on the sabbath day circumcise a man. 23 If a man on the sabbath day receive circumcision, that the law of Moses should not be broken; are ye angry at me, because I have made a man every whit whole on the sabbath day? 24 Judge not according to the appearance, but judge righteous judgment. 25 Then said some of them of Jerusalem, Is not this he, whom they seek to kill? 26 But, lo, he speaketh boldly, and they say nothing unto him. Do the rulers know indeed that this is the very Christ? 27 Howbeit we know this man whence he is: but when Christ cometh, no man knoweth whence he is. 28 Then cried Jesus in the temple as he taught, saying, Ye both know me, and ye know whence I am: and I am not come of myself, but he that sent me is true, whom ye know not. 29 But I know him: for I am from him, and he hath sent me. 30 Then they sought to take him: but no man laid hands on him, because his hour was not yet come. 31 And many of the people believed on him, and said, When Christ cometh, will he do more miracles than these which this man hath done? 32 The Pharisees heard that the people murmured such things concerning him; and the Pharisees and the chief priests sent officers to take him. 33 Then said Jesus unto them, Yet a little while am I with you, and then I go unto him that sent me. 34 Ye shall seek me, and shall not find me: and where I am, thither ye cannot come. 35 Then said the Jews among themselves, Whither will he go, that we shall not find him? will he go unto the dispersed among the Gentiles, and teach the Gentiles? 36 What manner of saying is this that he said, Ye shall seek me, and shall not find me: and where I am, thither ye cannot come? 37 In the last day, that great day of the feast, Jesus stood and cried, saying, If any man thirst, let him come unto me, and drink. 38 He that believeth on me, as the scripture hath said, out of his belly shall flow rivers of living water. 39 (But this spake he of the Spirit, which they that believe on him should receive: for the Holy Ghost was not yet given; because that Jesus was not yet glorified.) 40 Many of the people therefore, when they heard this saying, said, Of a truth this is the Prophet. 41 Others said, This is the Christ. But some said, Shall Christ come out of Galilee? 42 Hath not the scripture said, That Christ cometh of the seed of David, and out of the town of Bethlehem, where David was? 43 So there was a division among the people because of him. 44 And some of them would have taken him; but no man laid hands on him. 45 Then came the officers to the chief priests and Pharisees; and they said unto them, Why have ye not brought him? 46 The officers answered, Never man spake like this man. 47 Then answered them the Pharisees, Are ye also deceived? 48 Have any of the rulers or of the Pharisees believed on him? 49 But this people who knoweth not the law are cursed. 50 Nicodemus saith unto them, (he that came to Jesus by night, being one of them,) 51 Doth our law judge any man, before it hear him, and know what he doeth? 52 They answered and said unto him, Art thou also of Galilee? Search, and look: for out of Galilee ariseth no prophet. 53 And every man went unto his own house.
>>> b/'passover' Exodus 12:11,21,27,43,48;34:25;Leviticus 23:5;Numbers 9:2,4-6,10,12-14;28:16;33:3;Deuteronomy 16:1-2,5-6;Joshua 5:10-11;2 Kings 23:21-23;2 Chronicles 30:1-2,5,15,17-18;35:1,6-9,11,13,16-19;Ezra 6:19-20;Ezekiel 45:21;Matthew 26:2,17-19;Mark 14:1,12,14,16;Luke 2:41;22:1,7-8,11,13,15;John 2:13,23;6:4;11:55;12:1;13:1;18:28,39;19:14;1 Corinthians 5:7;Hebrews 11:28 (72 verses) >>> b.count('passover') 77 >>> b.count('pass over')
random_line_split
label_tracking.py
""" self.stopped = True class LabelTracker: def __init__(self, camera_index, trackerType, webCam, video=None): self.tracker = None # initialize the bounding box coordinates of the object we are going to track self.initBB = None self.vs = None # initialize the FPS throughput estimator self.fps = None self.trackerType = trackerType self.webCam = webCam self.video = video self.camera_index = camera_index self.video_stream = CameraVideoStream(camera_index) def trackLabel(self, label, debug=False): """ Sets up and tracks a given label """ self.setUp(debug) return self.track(label, debug) def setUp(self, debug=False): """ Set up everything for the video stream and tracking """ # extract the OpenCV version info (major, minor) = cv2.__version__.split(".")[:2] # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory # function to create our object tracker if int(major) == 3 and int(minor) < 3: self.tracker = cv2.Tracker_create(trackerType.upper()) # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the # approrpiate object tracker constructor: else: # initialize a dictionary that maps strings to their corresponding # OpenCV object tracker implementations OPENCV_OBJECT_TRACKERS = { "csrt": cv2.TrackerCSRT_create, "kcf": cv2.TrackerKCF_create, "boosting": cv2.TrackerBoosting_create, "mil": cv2.TrackerMIL_create, "tld": cv2.TrackerTLD_create, "medianflow": cv2.TrackerMedianFlow_create, "mosse": cv2.TrackerMOSSE_create } # grab the appropriate object tracker using our dictionary of # OpenCV object tracker objects self.tracker = OPENCV_OBJECT_TRACKERS[self.trackerType]() # If using the webcam, grab the reference to the web cam if self.webCam: print("[INFO] starting video stream...") """ self.vs = VideoStream(src=self.camera_index).start() time.sleep(1.0) """ self.video_stream.start() # otherwise, grab a reference to the video file else: self.vs = cv2.VideoCapture(self.video) # Open the window if debug: cv2.namedWindow("Frame", cv2.WINDOW_AUTOSIZE) def track(self, label, debug=False): """ Takes a label and tracks it in a video or webcam stram. Displays the video with the tracked objects. Returns false if the label is lost """ prev_speed = 0 count_frames_speed_0 = 0 mv = MoveRobot() # loop over frames from the video stream while True: # grab the current frame, then handle if we are using a # VideoStream or VideoCapture object """ frame = self.vs.read() frame = frame[1] if not self.webCam else frame """ if count_frames_speed_0 >= 10: break frame, boundary_lines = self.video_stream.read() # check to see if we have reached the end of the stream if frame is None: break # resize the frame (so we can process it faster) and grab the # frame dimensions frame = imutils.resize(frame, width=500) if not self.webCam else frame (H, W) = frame.shape[:2] # Start tracking the given label if self.initBB is None and label is not None: self.initBB = label # start OpenCV object tracker using the supplied bounding box # coordinates, then start the FPS throughput estimator as well self.tracker.init(frame, self.initBB) self.fps = FPS().start() label = None # check to see if we are currently tracking an object if self.initBB is not None: # grab the new bounding box coordinates of the object (success, box) = self.tracker.update(frame) # check to see if the tracking was a success if success: (x, y, w, h) = [int(v) for v in box] # Get the spine boundary lines label_rectangle = Rectangle(x, y, w, h) left_spine_bound, right_spine_bound = findSpineBoundaries(label_rectangle, boundary_lines) # Plot the lines if debug and left_spine_bound: left_spine_bound.plotOnImage(frame, thickness=2) if debug and right_spine_bound: right_spine_bound.plotOnImage(frame, thickness=2) distance_to_middle = 0 # If both spine bounds are in frame and found if (right_spine_bound is not None) and (left_spine_bound is not None): # Adjust the position of the robot # Find a point on the spine boundaries which is in the middle of the frame height left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) # Find a point on the middle of the spine spine_midpoint = left_spine_coordinate + (right_spine_coordinate - left_spine_coordinate) / 2 # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right distance_to_middle = int(( (W/2 - spine_midpoint) * 100 ) / (W/2)) # If only one spine bound is found if (right_spine_bound is None) and (left_spine_bound is not None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - left_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) and (left_spine_bound is None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - right_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) or (left_spine_bound is not None): if abs(distance_to_middle < 20): abs_speed = 0.005 else: abs_speed = 0.001 if abs(distance_to_middle) < 5: speed = 0 count_frames_speed_0 += 1 elif distance_to_middle < 0: speed = abs_speed count_frames_speed_0 = 0 else: speed = -abs_speed count_frames_speed_0 = 0 if speed != prev_speed: print("Moving with speed " + str(speed) + " !") Thread(target=mv.setSpeed, args=(speed,)).start() prev_speed = speed # Draw the rectangle around the label if debug: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) #else: #return success # update the FPS counter self.fps.update() self.fps.stop() print("FPS", "{:.2f}".format(self.fps.fps())) # initialize the set of information we'll be displaying on # the frame info = [ ("Tracker", self.trackerType), ("Success", "Yes" if success else "No"), ("FPS", "{:.2f}".format(self.fps.fps())), ] # loop over the info tuples and draw them on our frame if debug: for (i, (k, v)) in enumerate(info): text = "{}: {}".format(k, v) cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) # show the output frame if debug: cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break mv.shutDown() # if we are using a webcam, release the pointer if self.webCam: #self.vs.stop()
self.video_stream.stop()
conditional_block
label_tracking.py
""" Starts the thread to read frames from the video stream """ Thread(target=self.update, args=()).start() return self def update(self): """ Loops indefinitely and reads frames until the thread is stopped """ while True: if self.stopped: return else: self.grabbed, self.frame = self.stream.read() self.boundary_lines = findBookBoundaries(self.frame) def read(self): """ Returns the frame most recently read """ return self.frame, self.boundary_lines def stop(self): """ Stops the thread """ self.stopped = True class LabelTracker: def __init__(self, camera_index, trackerType, webCam, video=None): self.tracker = None # initialize the bounding box coordinates of the object we are going to track self.initBB = None self.vs = None # initialize the FPS throughput estimator self.fps = None self.trackerType = trackerType self.webCam = webCam self.video = video self.camera_index = camera_index self.video_stream = CameraVideoStream(camera_index) def trackLabel(self, label, debug=False): """ Sets up and tracks a given label """ self.setUp(debug) return self.track(label, debug) def setUp(self, debug=False):
"medianflow": cv2.TrackerMedianFlow_create, "mosse": cv2.TrackerMOSSE_create } # grab the appropriate object tracker using our dictionary of # OpenCV object tracker objects self.tracker = OPENCV_OBJECT_TRACKERS[self.trackerType]() # If using the webcam, grab the reference to the web cam if self.webCam: print("[INFO] starting video stream...") """ self.vs = VideoStream(src=self.camera_index).start() time.sleep(1.0) """ self.video_stream.start() # otherwise, grab a reference to the video file else: self.vs = cv2.VideoCapture(self.video) # Open the window if debug: cv2.namedWindow("Frame", cv2.WINDOW_AUTOSIZE) def track(self, label, debug=False): """ Takes a label and tracks it in a video or webcam stram. Displays the video with the tracked objects. Returns false if the label is lost """ prev_speed = 0 count_frames_speed_0 = 0 mv = MoveRobot() # loop over frames from the video stream while True: # grab the current frame, then handle if we are using a # VideoStream or VideoCapture object """ frame = self.vs.read() frame = frame[1] if not self.webCam else frame """ if count_frames_speed_0 >= 10: break frame, boundary_lines = self.video_stream.read() # check to see if we have reached the end of the stream if frame is None: break # resize the frame (so we can process it faster) and grab the # frame dimensions frame = imutils.resize(frame, width=500) if not self.webCam else frame (H, W) = frame.shape[:2] # Start tracking the given label if self.initBB is None and label is not None: self.initBB = label # start OpenCV object tracker using the supplied bounding box # coordinates, then start the FPS throughput estimator as well self.tracker.init(frame, self.initBB) self.fps = FPS().start() label = None # check to see if we are currently tracking an object if self.initBB is not None: # grab the new bounding box coordinates of the object (success, box) = self.tracker.update(frame) # check to see if the tracking was a success if success: (x, y, w, h) = [int(v) for v in box] # Get the spine boundary lines label_rectangle = Rectangle(x, y, w, h) left_spine_bound, right_spine_bound = findSpineBoundaries(label_rectangle, boundary_lines) # Plot the lines if debug and left_spine_bound: left_spine_bound.plotOnImage(frame, thickness=2) if debug and right_spine_bound: right_spine_bound.plotOnImage(frame, thickness=2) distance_to_middle = 0 # If both spine bounds are in frame and found if (right_spine_bound is not None) and (left_spine_bound is not None): # Adjust the position of the robot # Find a point on the spine boundaries which is in the middle of the frame height left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) # Find a point on the middle of the spine spine_midpoint = left_spine_coordinate + (right_spine_coordinate - left_spine_coordinate) / 2 # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right distance_to_middle = int(( (W/2 - spine_midpoint) * 100 ) / (W/2)) # If only one spine bound is found if (right_spine_bound is None) and (left_spine_bound is not None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - left_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) and (left_spine_bound is None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - right_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) or (left_spine_bound is not None): if abs(distance_to_middle < 20): abs_speed = 0.005 else: abs_speed = 0.001 if abs(distance_to_middle) < 5: speed = 0 count_frames_speed_0 += 1 elif distance_to_middle < 0: speed = abs_speed count_frames_speed_0 = 0 else: speed = -abs_speed count_frames_speed_0 = 0 if speed != prev_speed: print("Moving with speed " + str(speed) + " !") Thread(target=mv.setSpeed, args=(speed,)).start() prev_speed = speed # Draw the rectangle around the label if debug: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) #else: #return success # update the FPS counter self.fps.update() self.fps.stop() print("FPS", "{:.2f}".format(self.fps.fps())) # initialize the set of information we'll be displaying on # the frame info = [ ("Tracker", self.trackerType), ("Success", "Yes" if success else "No"), ("FPS", "{:.2f}".format(self.fps.fps())), ] # loop over the info tuples and draw them on our frame if debug: for (i, (k, v)) in enumerate(info): text = "{}: {}".format(k, v) cv2.putText(frame, text, (10, H - ((i *
""" Set up everything for the video stream and tracking """ # extract the OpenCV version info (major, minor) = cv2.__version__.split(".")[:2] # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory # function to create our object tracker if int(major) == 3 and int(minor) < 3: self.tracker = cv2.Tracker_create(trackerType.upper()) # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the # approrpiate object tracker constructor: else: # initialize a dictionary that maps strings to their corresponding # OpenCV object tracker implementations OPENCV_OBJECT_TRACKERS = { "csrt": cv2.TrackerCSRT_create, "kcf": cv2.TrackerKCF_create, "boosting": cv2.TrackerBoosting_create, "mil": cv2.TrackerMIL_create, "tld": cv2.TrackerTLD_create,
identifier_body
label_tracking.py
(self): """ Starts the thread to read frames from the video stream """ Thread(target=self.update, args=()).start() return self def update(self): """ Loops indefinitely and reads frames until the thread is stopped """ while True: if self.stopped: return else: self.grabbed, self.frame = self.stream.read() self.boundary_lines = findBookBoundaries(self.frame) def read(self): """ Returns the frame most recently read """ return self.frame, self.boundary_lines def stop(self): """ Stops the thread """ self.stopped = True class LabelTracker: def __init__(self, camera_index, trackerType, webCam, video=None): self.tracker = None # initialize the bounding box coordinates of the object we are going to track self.initBB = None self.vs = None # initialize the FPS throughput estimator self.fps = None self.trackerType = trackerType self.webCam = webCam self.video = video self.camera_index = camera_index self.video_stream = CameraVideoStream(camera_index) def trackLabel(self, label, debug=False): """ Sets up and tracks a given label """ self.setUp(debug) return self.track(label, debug) def setUp(self, debug=False): """ Set up everything for the video stream and tracking """ # extract the OpenCV version info (major, minor) = cv2.__version__.split(".")[:2] # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory # function to create our object tracker if int(major) == 3 and int(minor) < 3: self.tracker = cv2.Tracker_create(trackerType.upper()) # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the # approrpiate object tracker constructor: else: # initialize a dictionary that maps strings to their corresponding # OpenCV object tracker implementations OPENCV_OBJECT_TRACKERS = { "csrt": cv2.TrackerCSRT_create, "kcf": cv2.TrackerKCF_create, "boosting": cv2.TrackerBoosting_create, "mil": cv2.TrackerMIL_create, "tld": cv2.TrackerTLD_create, "medianflow": cv2.TrackerMedianFlow_create, "mosse": cv2.TrackerMOSSE_create } # grab the appropriate object tracker using our dictionary of # OpenCV object tracker objects self.tracker = OPENCV_OBJECT_TRACKERS[self.trackerType]() # If using the webcam, grab the reference to the web cam if self.webCam: print("[INFO] starting video stream...") """ self.vs = VideoStream(src=self.camera_index).start() time.sleep(1.0) """ self.video_stream.start() # otherwise, grab a reference to the video file else: self.vs = cv2.VideoCapture(self.video) # Open the window if debug: cv2.namedWindow("Frame", cv2.WINDOW_AUTOSIZE) def track(self, label, debug=False): """ Takes a label and tracks it in a video or webcam stram. Displays the video with the tracked objects. Returns false if the label is lost """ prev_speed = 0 count_frames_speed_0 = 0 mv = MoveRobot() # loop over frames from the video stream while True: # grab the current frame, then handle if we are using a # VideoStream or VideoCapture object """ frame = self.vs.read() frame = frame[1] if not self.webCam else frame """ if count_frames_speed_0 >= 10: break frame, boundary_lines = self.video_stream.read() # check to see if we have reached the end of the stream if frame is None: break # resize the frame (so we can process it faster) and grab the # frame dimensions frame = imutils.resize(frame, width=500) if not self.webCam else frame (H, W) = frame.shape[:2] # Start tracking the given label if self.initBB is None and label is not None: self.initBB = label # start OpenCV object tracker using the supplied bounding box # coordinates, then start the FPS throughput estimator as well self.tracker.init(frame, self.initBB) self.fps = FPS().start() label = None # check to see if we are currently tracking an object if self.initBB is not None: # grab the new bounding box coordinates of the object (success, box) = self.tracker.update(frame) # check to see if the tracking was a success if success: (x, y, w, h) = [int(v) for v in box] # Get the spine boundary lines label_rectangle = Rectangle(x, y, w, h) left_spine_bound, right_spine_bound = findSpineBoundaries(label_rectangle, boundary_lines) # Plot the lines if debug and left_spine_bound: left_spine_bound.plotOnImage(frame, thickness=2) if debug and right_spine_bound: right_spine_bound.plotOnImage(frame, thickness=2) distance_to_middle = 0 # If both spine bounds are in frame and found if (right_spine_bound is not None) and (left_spine_bound is not None): # Adjust the position of the robot # Find a point on the spine boundaries which is in the middle of the frame height left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) # Find a point on the middle of the spine spine_midpoint = left_spine_coordinate + (right_spine_coordinate - left_spine_coordinate) / 2 # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right distance_to_middle = int(( (W/2 - spine_midpoint) * 100 ) / (W/2)) # If only one spine bound is found if (right_spine_bound is None) and (left_spine_bound is not None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right left_spine_coordinate = left_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - left_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) and (left_spine_bound is None): # Distance from the point on the middle of the spine to the middle of the frame # Range 100 if spine is on the very left of the frame to -100 on the right right_spine_coordinate = right_spine_bound.calculateXgivenY(H/2) distance_to_middle = int(( (W/2 - right_spine_coordinate) * 100 ) / (W/2)) if (right_spine_bound is not None) or (left_spine_bound is not None): if abs(distance_to_middle < 20): abs_speed = 0.005 else: abs_speed = 0.001 if abs(distance_to_middle) < 5: speed = 0 count_frames_speed_0 += 1 elif distance_to_middle < 0: speed = abs_speed count_frames_speed_0 = 0 else: speed = -abs_speed count_frames_speed_0 = 0 if speed != prev_speed: print("Moving with speed " + str(speed) + " !") Thread(target=mv.setSpeed, args=(speed,)).start() prev_speed = speed # Draw the rectangle around the label if debug: cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) #else: #return success # update the FPS counter self.fps.update() self.fps.stop() print("FPS", "{:.2f}".format(self.fps.fps())) # initialize the set of information we'll be displaying on # the frame info = [ ("Tracker", self.trackerType), ("Success", "Yes" if success else "No"), ("FPS", "{:.2f}".format(self.fps.fps())), ] # loop over the info tuples and draw them on our frame if debug: for (i, (k, v)) in enumerate(info): text = "{}: {}".format(k, v) cv2.putText(frame, text, (10, H -
start
identifier_name
parser.py
4: 'High', 5: 'Critical' } def get_scan_types(self): return ["Veracode Scan"] def get_label_for_scan_types(self, scan_type): return "Veracode Scan" def
(self, scan_type): return "Detailed XML Report" def get_findings(self, filename, test): root = ElementTree.parse(filename).getroot() app_id = root.attrib['app_id'] report_date = datetime.strptime(root.attrib['last_update_time'], '%Y-%m-%d %H:%M:%S %Z') dupes = dict() # Get SAST findings # This assumes `<category/>` only exists within the `<severity/>` nodes. for category_node in root.findall('x:severity/x:category', namespaces=XML_NAMESPACE): # Mitigation text. mitigation_text = '' mitigation_text += category_node.find('x:recommendations/x:para', namespaces=XML_NAMESPACE).get('text') + "\n\n" # Bullet list of recommendations: mitigation_text += ''.join(list(map( lambda x: ' * ' + x.get('text') + '\n', category_node.findall('x:recommendations/x:para/x:bulletitem', namespaces=XML_NAMESPACE)))) for flaw_node in category_node.findall('x:cwe/x:staticflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] # Only process if we didn't do that before. if dupe_key not in dupes: # Add to list. dupes[dupe_key] = self.__xml_static_flaw_to_finding(app_id, flaw_node, mitigation_text, test) for flaw_node in category_node.findall('x:cwe/x:dynamicflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] if dupe_key not in dupes: dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(app_id, flaw_node, mitigation_text, test) # Get SCA findings for component in root.findall('x:software_composition_analysis/x:vulnerable_components' '/x:component', namespaces=XML_NAMESPACE): _library = component.attrib['library'] if 'library_id' in component.attrib and component.attrib['library_id'].startswith("maven:"): # Set the library name from the maven component if it's available to align with CycloneDX + Veracode SCA split_library_id = component.attrib['library_id'].split(":") if len(split_library_id) > 2: _library = split_library_id[2] _vendor = component.attrib['vendor'] _version = component.attrib['version'] for vulnerability in component.findall('x:vulnerabilities/x:vulnerability', namespaces=XML_NAMESPACE): # We don't have a Id for SCA findings so just generate a random one dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding(test, report_date, _vendor, _library, _version, vulnerability) return list(dupes.values()) @classmethod def __xml_flaw_to_unique_id(cls, app_id, xml_node): issue_id = xml_node.attrib['issueid'] return 'app-' + app_id + '_issue-' + issue_id @classmethod def __xml_flaw_to_severity(cls, xml_node): return cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info') @classmethod def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # Defaults finding = Finding() finding.test = test finding.mitigation = mitigation_text finding.static_finding = True finding.dynamic_finding = False finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(app_id, xml_node) # Report values finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.cwe = int(xml_node.attrib['cweid']) finding.title = xml_node.attrib['categoryname'] finding.impact = 'CIA Impact: ' + xml_node.attrib['cia_impact'].upper() # Note that DD's legacy dedupe hashing uses the description field, # so for compatibility, description field should contain very static info. _description = xml_node.attrib['description'].replace('. ', '.\n') finding.description = _description _references = 'None' if 'References:' in _description: _references = _description[_description.index( 'References:') + 13:].replace(') ', ')\n') finding.references = _references \ + "\n\nVulnerable Module: " + xml_node.attrib['module'] \ + "\nType: " + xml_node.attrib['type'] \ + "\nVeracode issue ID: " + xml_node.attrib['issueid'] _date_found = test.target_start if 'date_first_occurrence' in xml_node.attrib: _date_found = datetime.strptime( xml_node.attrib['date_first_occurrence'], '%Y-%m-%d %H:%M:%S %Z') finding.date = _date_found _is_mitigated = False _mitigated_date = None if ('mitigation_status' in xml_node.attrib and xml_node.attrib["mitigation_status"].lower() == "accepted"): # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): _is_mitigated = True _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated # Check if it's a FP in veracode. # Only check in case finding was mitigated, since DD doesn't allow # both `verified` and `false_p` to be true, while `verified` is implied on the import # level, not on the finding-level. _false_positive = False if _is_mitigated: _remediation_status = xml_node.attrib['remediation_status'].lower() if "false positive" in _remediation_status or "falsepositive" in _remediation_status: _false_positive = True finding.false_p = _false_positive return finding @classmethod def __xml_static_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = True finding.dynamic_finding = False _line_number = xml_node.attrib['line'] _functionrelativelocation = xml_node.attrib['functionrelativelocation'] if (_line_number is not None and _line_number.isdigit() and _functionrelativelocation is not None and _functionrelativelocation.isdigit()): finding.line = int(_line_number) + int(_functionrelativelocation) finding.sast_source_line = finding.line _source_file = xml_node.attrib.get('sourcefile') _sourcefilepath = xml_node.attrib.get('sourcefilepath') finding.file_path = _sourcefilepath + _source_file finding.sast_source_file_path = _sourcefilepath + _source_file _sast_source_obj = xml_node.attrib.get('functionprototype') finding.sast_source_object = _sast_source_obj if _sast_source_obj else None finding.unsaved_tags = ["sast"] return finding @classmethod def __xml_dynamic_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = False finding.dynamic_finding = True url_host = xml_node.attrib.get('url') finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] finding.unsaved_tags = ["dast"] return finding @staticmethod def _get_cwe(val): # Match only the first CWE! cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) else: return None @classmethod def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, xml_node): # Defaults finding = Finding() finding.test = test finding.static_finding = True finding.dynamic_finding = False # Report values cvss_score = float(xml_node.attrib['cvss_score']) finding.cvssv3_score = cvss_score finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.unsaved_vulnerability_ids = [xml_node.attrib['cve_id']] finding.cwe = cls._get_cwe(xml_node.attrib['cwe_id']) finding.title = "Vulnerable component: {0}:{1}".format(library, version) finding.component_name = library
get_description_for_scan_types
identifier_name
parser.py
4: 'High', 5: 'Critical' } def get_scan_types(self): return ["Veracode Scan"] def get_label_for_scan_types(self, scan_type): return "Veracode Scan" def get_description_for_scan_types(self, scan_type): return "Detailed XML Report" def get_findings(self, filename, test): root = ElementTree.parse(filename).getroot() app_id = root.attrib['app_id'] report_date = datetime.strptime(root.attrib['last_update_time'], '%Y-%m-%d %H:%M:%S %Z') dupes = dict() # Get SAST findings # This assumes `<category/>` only exists within the `<severity/>` nodes. for category_node in root.findall('x:severity/x:category', namespaces=XML_NAMESPACE): # Mitigation text. mitigation_text = '' mitigation_text += category_node.find('x:recommendations/x:para', namespaces=XML_NAMESPACE).get('text') + "\n\n" # Bullet list of recommendations: mitigation_text += ''.join(list(map( lambda x: ' * ' + x.get('text') + '\n', category_node.findall('x:recommendations/x:para/x:bulletitem', namespaces=XML_NAMESPACE)))) for flaw_node in category_node.findall('x:cwe/x:staticflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] # Only process if we didn't do that before. if dupe_key not in dupes: # Add to list. dupes[dupe_key] = self.__xml_static_flaw_to_finding(app_id, flaw_node, mitigation_text, test) for flaw_node in category_node.findall('x:cwe/x:dynamicflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] if dupe_key not in dupes: dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(app_id, flaw_node, mitigation_text, test) # Get SCA findings for component in root.findall('x:software_composition_analysis/x:vulnerable_components' '/x:component', namespaces=XML_NAMESPACE): _library = component.attrib['library'] if 'library_id' in component.attrib and component.attrib['library_id'].startswith("maven:"): # Set the library name from the maven component if it's available to align with CycloneDX + Veracode SCA split_library_id = component.attrib['library_id'].split(":") if len(split_library_id) > 2: _library = split_library_id[2] _vendor = component.attrib['vendor'] _version = component.attrib['version'] for vulnerability in component.findall('x:vulnerabilities/x:vulnerability', namespaces=XML_NAMESPACE): # We don't have a Id for SCA findings so just generate a random one dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding(test, report_date, _vendor, _library, _version, vulnerability) return list(dupes.values()) @classmethod def __xml_flaw_to_unique_id(cls, app_id, xml_node): issue_id = xml_node.attrib['issueid'] return 'app-' + app_id + '_issue-' + issue_id @classmethod def __xml_flaw_to_severity(cls, xml_node): return cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info') @classmethod def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # Defaults finding = Finding() finding.test = test finding.mitigation = mitigation_text finding.static_finding = True finding.dynamic_finding = False finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(app_id, xml_node) # Report values finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.cwe = int(xml_node.attrib['cweid']) finding.title = xml_node.attrib['categoryname'] finding.impact = 'CIA Impact: ' + xml_node.attrib['cia_impact'].upper() # Note that DD's legacy dedupe hashing uses the description field, # so for compatibility, description field should contain very static info. _description = xml_node.attrib['description'].replace('. ', '.\n') finding.description = _description _references = 'None' if 'References:' in _description: _references = _description[_description.index( 'References:') + 13:].replace(') ', ')\n') finding.references = _references \ + "\n\nVulnerable Module: " + xml_node.attrib['module'] \ + "\nType: " + xml_node.attrib['type'] \ + "\nVeracode issue ID: " + xml_node.attrib['issueid'] _date_found = test.target_start if 'date_first_occurrence' in xml_node.attrib: _date_found = datetime.strptime( xml_node.attrib['date_first_occurrence'], '%Y-%m-%d %H:%M:%S %Z') finding.date = _date_found _is_mitigated = False _mitigated_date = None if ('mitigation_status' in xml_node.attrib and xml_node.attrib["mitigation_status"].lower() == "accepted"): # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): _is_mitigated = True _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated # Check if it's a FP in veracode. # Only check in case finding was mitigated, since DD doesn't allow # both `verified` and `false_p` to be true, while `verified` is implied on the import # level, not on the finding-level. _false_positive = False if _is_mitigated: _remediation_status = xml_node.attrib['remediation_status'].lower() if "false positive" in _remediation_status or "falsepositive" in _remediation_status:
finding.false_p = _false_positive return finding @classmethod def __xml_static_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = True finding.dynamic_finding = False _line_number = xml_node.attrib['line'] _functionrelativelocation = xml_node.attrib['functionrelativelocation'] if (_line_number is not None and _line_number.isdigit() and _functionrelativelocation is not None and _functionrelativelocation.isdigit()): finding.line = int(_line_number) + int(_functionrelativelocation) finding.sast_source_line = finding.line _source_file = xml_node.attrib.get('sourcefile') _sourcefilepath = xml_node.attrib.get('sourcefilepath') finding.file_path = _sourcefilepath + _source_file finding.sast_source_file_path = _sourcefilepath + _source_file _sast_source_obj = xml_node.attrib.get('functionprototype') finding.sast_source_object = _sast_source_obj if _sast_source_obj else None finding.unsaved_tags = ["sast"] return finding @classmethod def __xml_dynamic_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = False finding.dynamic_finding = True url_host = xml_node.attrib.get('url') finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] finding.unsaved_tags = ["dast"] return finding @staticmethod def _get_cwe(val): # Match only the first CWE! cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) else: return None @classmethod def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, xml_node): # Defaults finding = Finding() finding.test = test finding.static_finding = True finding.dynamic_finding = False # Report values cvss_score = float(xml_node.attrib['cvss_score']) finding.cvssv3_score = cvss_score finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.unsaved_vulnerability_ids = [xml_node.attrib['cve_id']] finding.cwe = cls._get_cwe(xml_node.attrib['cwe_id']) finding.title = "Vulnerable component: {0}:{1}".format(library, version) finding.component_name = library
_false_positive = True
conditional_block
parser.py
app_id = root.attrib['app_id'] report_date = datetime.strptime(root.attrib['last_update_time'], '%Y-%m-%d %H:%M:%S %Z') dupes = dict() # Get SAST findings # This assumes `<category/>` only exists within the `<severity/>` nodes. for category_node in root.findall('x:severity/x:category', namespaces=XML_NAMESPACE): # Mitigation text. mitigation_text = '' mitigation_text += category_node.find('x:recommendations/x:para', namespaces=XML_NAMESPACE).get('text') + "\n\n" # Bullet list of recommendations: mitigation_text += ''.join(list(map( lambda x: ' * ' + x.get('text') + '\n', category_node.findall('x:recommendations/x:para/x:bulletitem', namespaces=XML_NAMESPACE)))) for flaw_node in category_node.findall('x:cwe/x:staticflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] # Only process if we didn't do that before. if dupe_key not in dupes: # Add to list. dupes[dupe_key] = self.__xml_static_flaw_to_finding(app_id, flaw_node, mitigation_text, test) for flaw_node in category_node.findall('x:cwe/x:dynamicflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] if dupe_key not in dupes: dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(app_id, flaw_node, mitigation_text, test) # Get SCA findings for component in root.findall('x:software_composition_analysis/x:vulnerable_components' '/x:component', namespaces=XML_NAMESPACE): _library = component.attrib['library'] if 'library_id' in component.attrib and component.attrib['library_id'].startswith("maven:"): # Set the library name from the maven component if it's available to align with CycloneDX + Veracode SCA split_library_id = component.attrib['library_id'].split(":") if len(split_library_id) > 2: _library = split_library_id[2] _vendor = component.attrib['vendor'] _version = component.attrib['version'] for vulnerability in component.findall('x:vulnerabilities/x:vulnerability', namespaces=XML_NAMESPACE): # We don't have a Id for SCA findings so just generate a random one dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding(test, report_date, _vendor, _library, _version, vulnerability) return list(dupes.values()) @classmethod def __xml_flaw_to_unique_id(cls, app_id, xml_node): issue_id = xml_node.attrib['issueid'] return 'app-' + app_id + '_issue-' + issue_id @classmethod def __xml_flaw_to_severity(cls, xml_node): return cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info') @classmethod def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # Defaults finding = Finding() finding.test = test finding.mitigation = mitigation_text finding.static_finding = True finding.dynamic_finding = False finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(app_id, xml_node) # Report values finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.cwe = int(xml_node.attrib['cweid']) finding.title = xml_node.attrib['categoryname'] finding.impact = 'CIA Impact: ' + xml_node.attrib['cia_impact'].upper() # Note that DD's legacy dedupe hashing uses the description field, # so for compatibility, description field should contain very static info. _description = xml_node.attrib['description'].replace('. ', '.\n') finding.description = _description _references = 'None' if 'References:' in _description: _references = _description[_description.index( 'References:') + 13:].replace(') ', ')\n') finding.references = _references \ + "\n\nVulnerable Module: " + xml_node.attrib['module'] \ + "\nType: " + xml_node.attrib['type'] \ + "\nVeracode issue ID: " + xml_node.attrib['issueid'] _date_found = test.target_start if 'date_first_occurrence' in xml_node.attrib: _date_found = datetime.strptime( xml_node.attrib['date_first_occurrence'], '%Y-%m-%d %H:%M:%S %Z') finding.date = _date_found _is_mitigated = False _mitigated_date = None if ('mitigation_status' in xml_node.attrib and xml_node.attrib["mitigation_status"].lower() == "accepted"): # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): _is_mitigated = True _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated # Check if it's a FP in veracode. # Only check in case finding was mitigated, since DD doesn't allow # both `verified` and `false_p` to be true, while `verified` is implied on the import # level, not on the finding-level. _false_positive = False if _is_mitigated: _remediation_status = xml_node.attrib['remediation_status'].lower() if "false positive" in _remediation_status or "falsepositive" in _remediation_status: _false_positive = True finding.false_p = _false_positive return finding @classmethod def __xml_static_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = True finding.dynamic_finding = False _line_number = xml_node.attrib['line'] _functionrelativelocation = xml_node.attrib['functionrelativelocation'] if (_line_number is not None and _line_number.isdigit() and _functionrelativelocation is not None and _functionrelativelocation.isdigit()): finding.line = int(_line_number) + int(_functionrelativelocation) finding.sast_source_line = finding.line _source_file = xml_node.attrib.get('sourcefile') _sourcefilepath = xml_node.attrib.get('sourcefilepath') finding.file_path = _sourcefilepath + _source_file finding.sast_source_file_path = _sourcefilepath + _source_file _sast_source_obj = xml_node.attrib.get('functionprototype') finding.sast_source_object = _sast_source_obj if _sast_source_obj else None finding.unsaved_tags = ["sast"] return finding @classmethod def __xml_dynamic_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = False finding.dynamic_finding = True url_host = xml_node.attrib.get('url') finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] finding.unsaved_tags = ["dast"] return finding @staticmethod def _get_cwe(val): # Match only the first CWE! cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) else: return None @classmethod def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, xml_node): # Defaults finding = Finding() finding.test = test finding.static_finding = True finding.dynamic_finding = False # Report values cvss_score = float(xml_node.attrib['cvss_score']) finding.cvssv3_score = cvss_score finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.unsaved_vulnerability_ids = [xml_node.attrib['cve_id']] finding.cwe = cls._get_cwe(xml_node.attrib['cwe_id']) finding.title = "Vulnerable component: {0}:{1}".format(library, version) finding.component_name = library finding.component_version = version # Use report-date, otherwise DD doesn't # overwrite old matching SCA findings. finding.date = report_date _description = 'This library has known vulnerabilities.\n' _description += \ "**CVE:** {0} ({1})\n" \ "CVS Score: {2} ({3})\n" \ "Summary: \n>{4}" \
random_line_split
parser.py
4: 'High', 5: 'Critical' } def get_scan_types(self): return ["Veracode Scan"] def get_label_for_scan_types(self, scan_type): return "Veracode Scan" def get_description_for_scan_types(self, scan_type): return "Detailed XML Report" def get_findings(self, filename, test): root = ElementTree.parse(filename).getroot() app_id = root.attrib['app_id'] report_date = datetime.strptime(root.attrib['last_update_time'], '%Y-%m-%d %H:%M:%S %Z') dupes = dict() # Get SAST findings # This assumes `<category/>` only exists within the `<severity/>` nodes. for category_node in root.findall('x:severity/x:category', namespaces=XML_NAMESPACE): # Mitigation text. mitigation_text = '' mitigation_text += category_node.find('x:recommendations/x:para', namespaces=XML_NAMESPACE).get('text') + "\n\n" # Bullet list of recommendations: mitigation_text += ''.join(list(map( lambda x: ' * ' + x.get('text') + '\n', category_node.findall('x:recommendations/x:para/x:bulletitem', namespaces=XML_NAMESPACE)))) for flaw_node in category_node.findall('x:cwe/x:staticflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] # Only process if we didn't do that before. if dupe_key not in dupes: # Add to list. dupes[dupe_key] = self.__xml_static_flaw_to_finding(app_id, flaw_node, mitigation_text, test) for flaw_node in category_node.findall('x:cwe/x:dynamicflaws/x:flaw', namespaces=XML_NAMESPACE): dupe_key = flaw_node.attrib['issueid'] if dupe_key not in dupes: dupes[dupe_key] = self.__xml_dynamic_flaw_to_finding(app_id, flaw_node, mitigation_text, test) # Get SCA findings for component in root.findall('x:software_composition_analysis/x:vulnerable_components' '/x:component', namespaces=XML_NAMESPACE): _library = component.attrib['library'] if 'library_id' in component.attrib and component.attrib['library_id'].startswith("maven:"): # Set the library name from the maven component if it's available to align with CycloneDX + Veracode SCA split_library_id = component.attrib['library_id'].split(":") if len(split_library_id) > 2: _library = split_library_id[2] _vendor = component.attrib['vendor'] _version = component.attrib['version'] for vulnerability in component.findall('x:vulnerabilities/x:vulnerability', namespaces=XML_NAMESPACE): # We don't have a Id for SCA findings so just generate a random one dupes[str(uuid.uuid4())] = self.__xml_sca_flaw_to_finding(test, report_date, _vendor, _library, _version, vulnerability) return list(dupes.values()) @classmethod def __xml_flaw_to_unique_id(cls, app_id, xml_node): issue_id = xml_node.attrib['issueid'] return 'app-' + app_id + '_issue-' + issue_id @classmethod def __xml_flaw_to_severity(cls, xml_node): return cls.vc_severity_mapping.get(int(xml_node.attrib['severity']), 'Info') @classmethod def __xml_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): # Defaults finding = Finding() finding.test = test finding.mitigation = mitigation_text finding.static_finding = True finding.dynamic_finding = False finding.unique_id_from_tool = cls.__xml_flaw_to_unique_id(app_id, xml_node) # Report values finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.cwe = int(xml_node.attrib['cweid']) finding.title = xml_node.attrib['categoryname'] finding.impact = 'CIA Impact: ' + xml_node.attrib['cia_impact'].upper() # Note that DD's legacy dedupe hashing uses the description field, # so for compatibility, description field should contain very static info. _description = xml_node.attrib['description'].replace('. ', '.\n') finding.description = _description _references = 'None' if 'References:' in _description: _references = _description[_description.index( 'References:') + 13:].replace(') ', ')\n') finding.references = _references \ + "\n\nVulnerable Module: " + xml_node.attrib['module'] \ + "\nType: " + xml_node.attrib['type'] \ + "\nVeracode issue ID: " + xml_node.attrib['issueid'] _date_found = test.target_start if 'date_first_occurrence' in xml_node.attrib: _date_found = datetime.strptime( xml_node.attrib['date_first_occurrence'], '%Y-%m-%d %H:%M:%S %Z') finding.date = _date_found _is_mitigated = False _mitigated_date = None if ('mitigation_status' in xml_node.attrib and xml_node.attrib["mitigation_status"].lower() == "accepted"): # This happens if any mitigation (including 'Potential false positive') # was accepted in VC. for mitigation in xml_node.findall("x:mitigations/x:mitigation", namespaces=XML_NAMESPACE): _is_mitigated = True _mitigated_date = datetime.strptime(mitigation.attrib['date'], '%Y-%m-%d %H:%M:%S %Z') finding.is_mitigated = _is_mitigated finding.mitigated = _mitigated_date finding.active = not _is_mitigated # Check if it's a FP in veracode. # Only check in case finding was mitigated, since DD doesn't allow # both `verified` and `false_p` to be true, while `verified` is implied on the import # level, not on the finding-level. _false_positive = False if _is_mitigated: _remediation_status = xml_node.attrib['remediation_status'].lower() if "false positive" in _remediation_status or "falsepositive" in _remediation_status: _false_positive = True finding.false_p = _false_positive return finding @classmethod def __xml_static_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = True finding.dynamic_finding = False _line_number = xml_node.attrib['line'] _functionrelativelocation = xml_node.attrib['functionrelativelocation'] if (_line_number is not None and _line_number.isdigit() and _functionrelativelocation is not None and _functionrelativelocation.isdigit()): finding.line = int(_line_number) + int(_functionrelativelocation) finding.sast_source_line = finding.line _source_file = xml_node.attrib.get('sourcefile') _sourcefilepath = xml_node.attrib.get('sourcefilepath') finding.file_path = _sourcefilepath + _source_file finding.sast_source_file_path = _sourcefilepath + _source_file _sast_source_obj = xml_node.attrib.get('functionprototype') finding.sast_source_object = _sast_source_obj if _sast_source_obj else None finding.unsaved_tags = ["sast"] return finding @classmethod def __xml_dynamic_flaw_to_finding(cls, app_id, xml_node, mitigation_text, test): finding = cls.__xml_flaw_to_finding(app_id, xml_node, mitigation_text, test) finding.static_finding = False finding.dynamic_finding = True url_host = xml_node.attrib.get('url') finding.unsaved_endpoints = [Endpoint.from_uri(url_host)] finding.unsaved_tags = ["dast"] return finding @staticmethod def _get_cwe(val): # Match only the first CWE!
@classmethod def __xml_sca_flaw_to_finding(cls, test, report_date, vendor, library, version, xml_node): # Defaults finding = Finding() finding.test = test finding.static_finding = True finding.dynamic_finding = False # Report values cvss_score = float(xml_node.attrib['cvss_score']) finding.cvssv3_score = cvss_score finding.severity = cls.__xml_flaw_to_severity(xml_node) finding.unsaved_vulnerability_ids = [xml_node.attrib['cve_id']] finding.cwe = cls._get_cwe(xml_node.attrib['cwe_id']) finding.title = "Vulnerable component: {0}:{1}".format(library, version) finding.component_name = library
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE) if cweSearch: return int(cweSearch.group(1)) else: return None
identifier_body
multicluster.go
, kubeRegistry, &options, configCluster } // initializeCluster initializes the cluster by setting various handlers. func (m *Multicluster) initializeCluster(cluster *multicluster.Cluster, kubeController *kubeController, kubeRegistry *Controller, options Options, configCluster bool, clusterStopCh <-chan struct{}, ) { client := cluster.Client if m.serviceEntryController != nil && features.EnableServiceEntrySelectPods { // Add an instance handler in the kubernetes registry to notify service entry store about pod events kubeRegistry.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) } if m.configController != nil && features.EnableAmbientControllers { m.configController.RegisterEventHandler(gvk.AuthorizationPolicy, kubeRegistry.AuthorizationPolicyHandler) m.configController.RegisterEventHandler(gvk.PeerAuthentication, kubeRegistry.PeerAuthenticationHandler) } if configCluster && m.serviceEntryController != nil && features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(m.serviceEntryController.NamespaceDiscoveryHandler) } // TODO implement deduping in aggregate registry to allow multiple k8s registries to handle WorkloadEntry if features.EnableK8SServiceSelectWorkloadEntries { if m.serviceEntryController != nil && configCluster { // Add an instance handler in the service entry store to notify kubernetes about workload entry events m.serviceEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) } else if features.WorkloadEntryCrossCluster { // TODO only do this for non-remotes, can't guarantee CRDs in remotes (depends on https://github.com/istio/istio/pull/29824) configStore := createWleConfigStore(client, m.revision, options) kubeController.workloadEntryController = serviceentry.NewWorkloadEntryController( configStore, options.XDSUpdater, serviceentry.WithClusterID(cluster.ID), serviceentry.WithNetworkIDCb(kubeRegistry.Network)) // Services can select WorkloadEntry from the same cluster. We only duplicate the Service to configure kube-dns. kubeController.workloadEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) // ServiceEntry selects WorkloadEntry from remote cluster kubeController.workloadEntryController.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) if features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(kubeController.workloadEntryController.NamespaceDiscoveryHandler) } m.opts.MeshServiceController.AddRegistryAndRun(kubeController.workloadEntryController, clusterStopCh) go configStore.Run(clusterStopCh) } } // namespacecontroller requires discoverySelectors only if EnableEnhancedResourceScoping feature flag is set. var discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter if features.EnableEnhancedResourceScoping { discoveryNamespacesFilter = kubeRegistry.opts.DiscoveryNamespacesFilter } // run after WorkloadHandler is added m.opts.MeshServiceController.AddRegistryAndRun(kubeRegistry, clusterStopCh) go func() { var shouldLead bool if !configCluster { shouldLead = m.checkShouldLead(client, options.SystemNamespace, clusterStopCh) log.Infof("should join leader-election for cluster %s: %t", cluster.ID, shouldLead) } if m.startNsController && (shouldLead || configCluster) { // Block server exit on graceful termination of the leader controller. m.s.RunComponentAsyncAndWait("namespace controller", func(_ <-chan struct{}) error { log.Infof("joining leader-election for %s in %s on cluster %s", leaderelection.NamespaceController, options.SystemNamespace, options.ClusterID) election := leaderelection. NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.NamespaceController, m.revision, !configCluster, client). AddRunFunction(func(leaderStop <-chan struct{}) { log.Infof("starting namespace controller for cluster %s", cluster.ID) nc := NewNamespaceController(client, m.caBundleWatcher, discoveryNamespacesFilter) // Start informers again. This fixes the case where informers for namespace do not start, // as we create them only after acquiring the leader lock // Note: stop here should be the overall pilot stop, NOT the leader election stop. We are // basically lazy loading the informer, if we stop it when we lose the lock we will never // recreate it again. client.RunAndWait(clusterStopCh) nc.Run(leaderStop) }) election.Run(clusterStopCh) return nil }) } // Set up injection webhook patching for remote clusters we are controlling. // The config cluster has this patching set up elsewhere. We may eventually want to move it here. // We can not use leader election for webhook patching because each revision needs to patch its own // webhook. if shouldLead && !configCluster && m.caBundleWatcher != nil { // Patch injection webhook cert // This requires RBAC permissions - a low-priv Istiod should not attempt to patch but rely on // operator or CI/CD if features.InjectionWebhookConfigName != "" { log.Infof("initializing injection webhook cert patcher for cluster %s", cluster.ID) patcher, err := webhooks.NewWebhookCertPatcher(client, m.revision, webhookName, m.caBundleWatcher) if err != nil { log.Errorf("could not initialize webhook cert patcher: %v", err) } else { go patcher.Run(clusterStopCh) } } } }() // setting up the serviceexport controller if and only if it is turned on in the meshconfig. if features.EnableMCSAutoExport { log.Infof("joining leader-election for %s in %s on cluster %s", leaderelection.ServiceExportController, options.SystemNamespace, options.ClusterID) // Block server exit on graceful termination of the leader controller. m.s.RunComponentAsyncAndWait("auto serviceexport controller", func(_ <-chan struct{}) error { leaderelection. NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.ServiceExportController, m.revision, !configCluster, client). AddRunFunction(func(leaderStop <-chan struct{}) { serviceExportController := newAutoServiceExportController(autoServiceExportOptions{ Client: client, ClusterID: options.ClusterID, DomainSuffix: options.DomainSuffix, ClusterLocal: m.clusterLocal, }) // Start informers again. This fixes the case where informers do not start, // as we create them only after acquiring the leader lock // Note: stop here should be the overall pilot stop, NOT the leader election stop. We are // basically lazy loading the informer, if we stop it when we lose the lock we will never // recreate it again. client.RunAndWait(clusterStopCh) serviceExportController.Run(leaderStop) }).Run(clusterStopCh) return nil }) } } // checkShouldLead returns true if the caller should attempt leader election for a remote cluster. func (m *Multicluster) checkShouldLead(client kubelib.Client, systemNamespace string, stop <-chan struct{}) bool { var res bool if features.ExternalIstiod { b := backoff.NewExponentialBackOff(backoff.DefaultOption()) ctx, cancel := context.WithCancel(context.Background()) go func() { select { case <-stop: cancel() case <-ctx.Done(): } }() defer cancel() _ = b.RetryWithContext(ctx, func() error { namespace, err := client.Kube().CoreV1().Namespaces().Get(context.TODO(), systemNamespace, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return nil } return err } // found same system namespace on the remote cluster so check if we are a selected istiod to lead istiodCluster, found := namespace.Annotations[annotation.TopologyControlPlaneClusters.Name] if found { localCluster := string(m.opts.ClusterID) for _, cluster := range strings.Split(istiodCluster, ",") { if cluster == "*" || cluster == localCluster { res = true return nil } } } return nil }) } return res } // deleteCluster deletes cluster resources and does not trigger push. // This call is not thread safe. func (m *Multicluster) deleteCluster(clusterID cluster.ID) { m.opts.MeshServiceController.UnRegisterHandlersForCluster(clusterID) m.opts.MeshServiceController.DeleteRegistry(clusterID, provider.Kubernetes) kc, ok := m.remoteKubeControllers[clusterID] if !ok { log.Infof("cluster %s does not exist, maybe caused by invalid kubeconfig", clusterID) return } if kc.workloadEntryController != nil { m.opts.MeshServiceController.DeleteRegistry(clusterID, provider.External) } if err := kc.Cleanup(); err != nil { log.Warnf("failed cleaning up services in %s: %v", clusterID, err) } delete(m.remoteKubeControllers, clusterID) } func
createWleConfigStore
identifier_name
multicluster.go
Options // client for reading remote-secrets to initialize multicluster registries client kubernetes.Interface s server.Instance closing bool serviceEntryController *serviceentry.Controller configController model.ConfigStoreController XDSUpdater model.XDSUpdater m sync.Mutex // protects remoteKubeControllers remoteKubeControllers map[cluster.ID]*kubeController clusterLocal model.ClusterLocalProvider startNsController bool caBundleWatcher *keycertbundle.Watcher revision string // secretNamespace where we get cluster-access secrets secretNamespace string } // NewMulticluster initializes data structure to store multicluster information func NewMulticluster( serverID string, kc kubernetes.Interface, secretNamespace string, opts Options, serviceEntryController *serviceentry.Controller, configController model.ConfigStoreController, caBundleWatcher *keycertbundle.Watcher, revision string, startNsController bool, clusterLocal model.ClusterLocalProvider, s server.Instance, ) *Multicluster { remoteKubeController := make(map[cluster.ID]*kubeController) mc := &Multicluster{ serverID: serverID, opts: opts, serviceEntryController: serviceEntryController, configController: configController, startNsController: startNsController, caBundleWatcher: caBundleWatcher, revision: revision, XDSUpdater: opts.XDSUpdater, remoteKubeControllers: remoteKubeController, clusterLocal: clusterLocal, secretNamespace: secretNamespace, client: kc, s: s, } return mc } func (m *Multicluster) Run(stopCh <-chan struct{}) error { // Wait for server shutdown. <-stopCh return m.close() } func (m *Multicluster) close() error { m.m.Lock() m.closing = true // Gather all the member clusters. var clusterIDs []cluster.ID for clusterID := range m.remoteKubeControllers { clusterIDs = append(clusterIDs, clusterID) } m.m.Unlock() // Remove all the clusters. g, _ := errgroup.WithContext(context.Background()) for _, clusterID := range clusterIDs { clusterID := clusterID g.Go(func() error { m.ClusterDeleted(clusterID) return nil }) } return g.Wait() } // ClusterAdded is passed to the secret controller as a callback to be called // when a remote cluster is added. This function needs to set up all the handlers // to watch for resources being added, deleted or changed on remote clusters. func (m *Multicluster) ClusterAdded(cluster *multicluster.Cluster, clusterStopCh <-chan struct{}) { m.m.Lock() kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, clusterStopCh) } // ClusterUpdated is passed to the secret controller as a callback to be called // when a remote cluster is updated. func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, stop <-chan struct{})
// ClusterDeleted is passed to the secret controller as a callback to be called // when a remote cluster is deleted. Also must clear the cache so remote resources // are removed. func (m *Multicluster) ClusterDeleted(clusterID cluster.ID) { m.m.Lock() m.deleteCluster(clusterID) m.m.Unlock() if m.XDSUpdater != nil { m.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.ClusterUpdate)}) } } // addCluster adds cluster related resources and updates internal structures. // This is not thread safe. func (m *Multicluster) addCluster(cluster *multicluster.Cluster) (*kubeController, *Controller, *Options, bool) { if m.closing { return nil, nil, nil, false } client := cluster.Client configCluster := m.opts.ClusterID == cluster.ID options := m.opts options.ClusterID = cluster.ID if !configCluster { options.SyncTimeout = features.RemoteClusterTimeout } // config cluster's DiscoveryNamespacesFilter is shared by both configController and serviceController // it is initiated in bootstrap initMulticluster function, pass to service controller to update it. // For other clusters, it should filter by its own cluster's namespace. if !configCluster { options.DiscoveryNamespacesFilter = nil } options.ConfigController = m.configController log.Infof("Initializing Kubernetes service registry %q", options.ClusterID) options.ConfigCluster = configCluster kubeRegistry := NewController(client, options) kubeController := &kubeController{ Controller: kubeRegistry, } m.remoteKubeControllers[cluster.ID] = kubeController return kubeController, kubeRegistry, &options, configCluster } // initializeCluster initializes the cluster by setting various handlers. func (m *Multicluster) initializeCluster(cluster *multicluster.Cluster, kubeController *kubeController, kubeRegistry *Controller, options Options, configCluster bool, clusterStopCh <-chan struct{}, ) { client := cluster.Client if m.serviceEntryController != nil && features.EnableServiceEntrySelectPods { // Add an instance handler in the kubernetes registry to notify service entry store about pod events kubeRegistry.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) } if m.configController != nil && features.EnableAmbientControllers { m.configController.RegisterEventHandler(gvk.AuthorizationPolicy, kubeRegistry.AuthorizationPolicyHandler) m.configController.RegisterEventHandler(gvk.PeerAuthentication, kubeRegistry.PeerAuthenticationHandler) } if configCluster && m.serviceEntryController != nil && features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(m.serviceEntryController.NamespaceDiscoveryHandler) } // TODO implement deduping in aggregate registry to allow multiple k8s registries to handle WorkloadEntry if features.EnableK8SServiceSelectWorkloadEntries { if m.serviceEntryController != nil && configCluster { // Add an instance handler in the service entry store to notify kubernetes about workload entry events m.serviceEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) } else if features.WorkloadEntryCrossCluster { // TODO only do this for non-remotes, can't guarantee CRDs in remotes (depends on https://github.com/istio/istio/pull/29824) configStore := createWleConfigStore(client, m.revision, options) kubeController.workloadEntryController = serviceentry.NewWorkloadEntryController( configStore, options.XDSUpdater, serviceentry.WithClusterID(cluster.ID), serviceentry.WithNetworkIDCb(kubeRegistry.Network)) // Services can select WorkloadEntry from the same cluster. We only duplicate the Service to configure kube-dns. kubeController.workloadEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) // ServiceEntry selects WorkloadEntry from remote cluster kubeController.workloadEntryController.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) if features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(kubeController.workloadEntryController.NamespaceDiscoveryHandler) } m.opts.MeshServiceController.AddRegistryAndRun(kubeController.workloadEntryController, clusterStopCh) go configStore.Run(clusterStopCh) } } // namespacecontroller requires discoverySelectors only if EnableEnhancedResourceScoping feature flag is set. var discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter if features.EnableEnhancedResourceScoping { discoveryNamespacesFilter = kubeRegistry.opts.DiscoveryNamespacesFilter } // run after WorkloadHandler is added m.opts.MeshServiceController.AddRegistryAndRun(kubeRegistry, clusterStopCh) go func() { var shouldLead bool if !configCluster { shouldLead = m.checkShouldLead(client, options.SystemNamespace, clusterStopCh) log.Infof("should join leader-election for cluster %s: %t", cluster.ID, shouldLead) } if m.startNsController && (shouldLead || configCluster) { // Block server exit on graceful termination of the leader controller. m.s.RunComponentAsyncAndWait("namespace controller", func(_ <-chan struct{}) error { log.Infof("joining leader-election for %s in %s on cluster %s", leaderelection.NamespaceController, options.SystemNamespace, options.ClusterID) election := leaderelection. NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.NamespaceController, m.revision, !configCluster, client). AddRunFunction(func(leaderStop <-chan struct
{ m.m.Lock() m.deleteCluster(cluster.ID) kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, stop) }
identifier_body
multicluster.go
opts Options // client for reading remote-secrets to initialize multicluster registries client kubernetes.Interface s server.Instance closing bool serviceEntryController *serviceentry.Controller configController model.ConfigStoreController XDSUpdater model.XDSUpdater m sync.Mutex // protects remoteKubeControllers remoteKubeControllers map[cluster.ID]*kubeController clusterLocal model.ClusterLocalProvider startNsController bool caBundleWatcher *keycertbundle.Watcher revision string // secretNamespace where we get cluster-access secrets secretNamespace string } // NewMulticluster initializes data structure to store multicluster information func NewMulticluster( serverID string, kc kubernetes.Interface, secretNamespace string, opts Options, serviceEntryController *serviceentry.Controller, configController model.ConfigStoreController, caBundleWatcher *keycertbundle.Watcher, revision string, startNsController bool, clusterLocal model.ClusterLocalProvider, s server.Instance, ) *Multicluster { remoteKubeController := make(map[cluster.ID]*kubeController) mc := &Multicluster{ serverID: serverID, opts: opts, serviceEntryController: serviceEntryController, configController: configController, startNsController: startNsController, caBundleWatcher: caBundleWatcher, revision: revision, XDSUpdater: opts.XDSUpdater, remoteKubeControllers: remoteKubeController, clusterLocal: clusterLocal, secretNamespace: secretNamespace, client: kc, s: s, } return mc } func (m *Multicluster) Run(stopCh <-chan struct{}) error { // Wait for server shutdown. <-stopCh return m.close() } func (m *Multicluster) close() error { m.m.Lock() m.closing = true // Gather all the member clusters. var clusterIDs []cluster.ID for clusterID := range m.remoteKubeControllers { clusterIDs = append(clusterIDs, clusterID) } m.m.Unlock() // Remove all the clusters. g, _ := errgroup.WithContext(context.Background()) for _, clusterID := range clusterIDs { clusterID := clusterID g.Go(func() error { m.ClusterDeleted(clusterID) return nil }) } return g.Wait() } // ClusterAdded is passed to the secret controller as a callback to be called // when a remote cluster is added. This function needs to set up all the handlers // to watch for resources being added, deleted or changed on remote clusters. func (m *Multicluster) ClusterAdded(cluster *multicluster.Cluster, clusterStopCh <-chan struct{}) { m.m.Lock() kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, clusterStopCh) } // ClusterUpdated is passed to the secret controller as a callback to be called // when a remote cluster is updated. func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, stop <-chan struct{}) { m.m.Lock() m.deleteCluster(cluster.ID) kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, stop)
} // ClusterDeleted is passed to the secret controller as a callback to be called // when a remote cluster is deleted. Also must clear the cache so remote resources // are removed. func (m *Multicluster) ClusterDeleted(clusterID cluster.ID) { m.m.Lock() m.deleteCluster(clusterID) m.m.Unlock() if m.XDSUpdater != nil { m.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.ClusterUpdate)}) } } // addCluster adds cluster related resources and updates internal structures. // This is not thread safe. func (m *Multicluster) addCluster(cluster *multicluster.Cluster) (*kubeController, *Controller, *Options, bool) { if m.closing { return nil, nil, nil, false } client := cluster.Client configCluster := m.opts.ClusterID == cluster.ID options := m.opts options.ClusterID = cluster.ID if !configCluster { options.SyncTimeout = features.RemoteClusterTimeout } // config cluster's DiscoveryNamespacesFilter is shared by both configController and serviceController // it is initiated in bootstrap initMulticluster function, pass to service controller to update it. // For other clusters, it should filter by its own cluster's namespace. if !configCluster { options.DiscoveryNamespacesFilter = nil } options.ConfigController = m.configController log.Infof("Initializing Kubernetes service registry %q", options.ClusterID) options.ConfigCluster = configCluster kubeRegistry := NewController(client, options) kubeController := &kubeController{ Controller: kubeRegistry, } m.remoteKubeControllers[cluster.ID] = kubeController return kubeController, kubeRegistry, &options, configCluster } // initializeCluster initializes the cluster by setting various handlers. func (m *Multicluster) initializeCluster(cluster *multicluster.Cluster, kubeController *kubeController, kubeRegistry *Controller, options Options, configCluster bool, clusterStopCh <-chan struct{}, ) { client := cluster.Client if m.serviceEntryController != nil && features.EnableServiceEntrySelectPods { // Add an instance handler in the kubernetes registry to notify service entry store about pod events kubeRegistry.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) } if m.configController != nil && features.EnableAmbientControllers { m.configController.RegisterEventHandler(gvk.AuthorizationPolicy, kubeRegistry.AuthorizationPolicyHandler) m.configController.RegisterEventHandler(gvk.PeerAuthentication, kubeRegistry.PeerAuthenticationHandler) } if configCluster && m.serviceEntryController != nil && features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(m.serviceEntryController.NamespaceDiscoveryHandler) } // TODO implement deduping in aggregate registry to allow multiple k8s registries to handle WorkloadEntry if features.EnableK8SServiceSelectWorkloadEntries { if m.serviceEntryController != nil && configCluster { // Add an instance handler in the service entry store to notify kubernetes about workload entry events m.serviceEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) } else if features.WorkloadEntryCrossCluster { // TODO only do this for non-remotes, can't guarantee CRDs in remotes (depends on https://github.com/istio/istio/pull/29824) configStore := createWleConfigStore(client, m.revision, options) kubeController.workloadEntryController = serviceentry.NewWorkloadEntryController( configStore, options.XDSUpdater, serviceentry.WithClusterID(cluster.ID), serviceentry.WithNetworkIDCb(kubeRegistry.Network)) // Services can select WorkloadEntry from the same cluster. We only duplicate the Service to configure kube-dns. kubeController.workloadEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) // ServiceEntry selects WorkloadEntry from remote cluster kubeController.workloadEntryController.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) if features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(kubeController.workloadEntryController.NamespaceDiscoveryHandler) } m.opts.MeshServiceController.AddRegistryAndRun(kubeController.workloadEntryController, clusterStopCh) go configStore.Run(clusterStopCh) } } // namespacecontroller requires discoverySelectors only if EnableEnhancedResourceScoping feature flag is set. var discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter if features.EnableEnhancedResourceScoping { discoveryNamespacesFilter = kubeRegistry.opts.DiscoveryNamespacesFilter } // run after WorkloadHandler is added m.opts.MeshServiceController.AddRegistryAndRun(kubeRegistry, clusterStopCh) go func() { var shouldLead bool if !configCluster { shouldLead = m.checkShouldLead(client, options.SystemNamespace, clusterStopCh) log.Infof("should join leader-election for cluster %s: %t", cluster.ID, shouldLead) } if m.startNsController && (shouldLead || configCluster) { // Block server exit on graceful termination of the leader controller. m.s.RunComponentAsyncAndWait("namespace controller", func(_ <-chan struct{}) error { log.Infof("joining leader-election for %s in %s on cluster %s", leaderelection.NamespaceController, options.SystemNamespace, options.ClusterID) election := leaderelection. NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.NamespaceController, m.revision, !configCluster, client). AddRunFunction(func(leaderStop <-chan struct{})
random_line_split
multicluster.go
Options // client for reading remote-secrets to initialize multicluster registries client kubernetes.Interface s server.Instance closing bool serviceEntryController *serviceentry.Controller configController model.ConfigStoreController XDSUpdater model.XDSUpdater m sync.Mutex // protects remoteKubeControllers remoteKubeControllers map[cluster.ID]*kubeController clusterLocal model.ClusterLocalProvider startNsController bool caBundleWatcher *keycertbundle.Watcher revision string // secretNamespace where we get cluster-access secrets secretNamespace string } // NewMulticluster initializes data structure to store multicluster information func NewMulticluster( serverID string, kc kubernetes.Interface, secretNamespace string, opts Options, serviceEntryController *serviceentry.Controller, configController model.ConfigStoreController, caBundleWatcher *keycertbundle.Watcher, revision string, startNsController bool, clusterLocal model.ClusterLocalProvider, s server.Instance, ) *Multicluster { remoteKubeController := make(map[cluster.ID]*kubeController) mc := &Multicluster{ serverID: serverID, opts: opts, serviceEntryController: serviceEntryController, configController: configController, startNsController: startNsController, caBundleWatcher: caBundleWatcher, revision: revision, XDSUpdater: opts.XDSUpdater, remoteKubeControllers: remoteKubeController, clusterLocal: clusterLocal, secretNamespace: secretNamespace, client: kc, s: s, } return mc } func (m *Multicluster) Run(stopCh <-chan struct{}) error { // Wait for server shutdown. <-stopCh return m.close() } func (m *Multicluster) close() error { m.m.Lock() m.closing = true // Gather all the member clusters. var clusterIDs []cluster.ID for clusterID := range m.remoteKubeControllers { clusterIDs = append(clusterIDs, clusterID) } m.m.Unlock() // Remove all the clusters. g, _ := errgroup.WithContext(context.Background()) for _, clusterID := range clusterIDs { clusterID := clusterID g.Go(func() error { m.ClusterDeleted(clusterID) return nil }) } return g.Wait() } // ClusterAdded is passed to the secret controller as a callback to be called // when a remote cluster is added. This function needs to set up all the handlers // to watch for resources being added, deleted or changed on remote clusters. func (m *Multicluster) ClusterAdded(cluster *multicluster.Cluster, clusterStopCh <-chan struct{}) { m.m.Lock() kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, clusterStopCh) } // ClusterUpdated is passed to the secret controller as a callback to be called // when a remote cluster is updated. func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, stop <-chan struct{}) { m.m.Lock() m.deleteCluster(cluster.ID) kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster) if kubeController == nil { // m.closing was true, nothing to do. m.m.Unlock() return } m.m.Unlock() // clusterStopCh is a channel that will be closed when this cluster removed. m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, stop) } // ClusterDeleted is passed to the secret controller as a callback to be called // when a remote cluster is deleted. Also must clear the cache so remote resources // are removed. func (m *Multicluster) ClusterDeleted(clusterID cluster.ID) { m.m.Lock() m.deleteCluster(clusterID) m.m.Unlock() if m.XDSUpdater != nil { m.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.ClusterUpdate)}) } } // addCluster adds cluster related resources and updates internal structures. // This is not thread safe. func (m *Multicluster) addCluster(cluster *multicluster.Cluster) (*kubeController, *Controller, *Options, bool) { if m.closing { return nil, nil, nil, false } client := cluster.Client configCluster := m.opts.ClusterID == cluster.ID options := m.opts options.ClusterID = cluster.ID if !configCluster { options.SyncTimeout = features.RemoteClusterTimeout } // config cluster's DiscoveryNamespacesFilter is shared by both configController and serviceController // it is initiated in bootstrap initMulticluster function, pass to service controller to update it. // For other clusters, it should filter by its own cluster's namespace. if !configCluster { options.DiscoveryNamespacesFilter = nil } options.ConfigController = m.configController log.Infof("Initializing Kubernetes service registry %q", options.ClusterID) options.ConfigCluster = configCluster kubeRegistry := NewController(client, options) kubeController := &kubeController{ Controller: kubeRegistry, } m.remoteKubeControllers[cluster.ID] = kubeController return kubeController, kubeRegistry, &options, configCluster } // initializeCluster initializes the cluster by setting various handlers. func (m *Multicluster) initializeCluster(cluster *multicluster.Cluster, kubeController *kubeController, kubeRegistry *Controller, options Options, configCluster bool, clusterStopCh <-chan struct{}, ) { client := cluster.Client if m.serviceEntryController != nil && features.EnableServiceEntrySelectPods
if m.configController != nil && features.EnableAmbientControllers { m.configController.RegisterEventHandler(gvk.AuthorizationPolicy, kubeRegistry.AuthorizationPolicyHandler) m.configController.RegisterEventHandler(gvk.PeerAuthentication, kubeRegistry.PeerAuthenticationHandler) } if configCluster && m.serviceEntryController != nil && features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(m.serviceEntryController.NamespaceDiscoveryHandler) } // TODO implement deduping in aggregate registry to allow multiple k8s registries to handle WorkloadEntry if features.EnableK8SServiceSelectWorkloadEntries { if m.serviceEntryController != nil && configCluster { // Add an instance handler in the service entry store to notify kubernetes about workload entry events m.serviceEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) } else if features.WorkloadEntryCrossCluster { // TODO only do this for non-remotes, can't guarantee CRDs in remotes (depends on https://github.com/istio/istio/pull/29824) configStore := createWleConfigStore(client, m.revision, options) kubeController.workloadEntryController = serviceentry.NewWorkloadEntryController( configStore, options.XDSUpdater, serviceentry.WithClusterID(cluster.ID), serviceentry.WithNetworkIDCb(kubeRegistry.Network)) // Services can select WorkloadEntry from the same cluster. We only duplicate the Service to configure kube-dns. kubeController.workloadEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler) // ServiceEntry selects WorkloadEntry from remote cluster kubeController.workloadEntryController.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) if features.EnableEnhancedResourceScoping { kubeRegistry.AppendNamespaceDiscoveryHandlers(kubeController.workloadEntryController.NamespaceDiscoveryHandler) } m.opts.MeshServiceController.AddRegistryAndRun(kubeController.workloadEntryController, clusterStopCh) go configStore.Run(clusterStopCh) } } // namespacecontroller requires discoverySelectors only if EnableEnhancedResourceScoping feature flag is set. var discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter if features.EnableEnhancedResourceScoping { discoveryNamespacesFilter = kubeRegistry.opts.DiscoveryNamespacesFilter } // run after WorkloadHandler is added m.opts.MeshServiceController.AddRegistryAndRun(kubeRegistry, clusterStopCh) go func() { var shouldLead bool if !configCluster { shouldLead = m.checkShouldLead(client, options.SystemNamespace, clusterStopCh) log.Infof("should join leader-election for cluster %s: %t", cluster.ID, shouldLead) } if m.startNsController && (shouldLead || configCluster) { // Block server exit on graceful termination of the leader controller. m.s.RunComponentAsyncAndWait("namespace controller", func(_ <-chan struct{}) error { log.Infof("joining leader-election for %s in %s on cluster %s", leaderelection.NamespaceController, options.SystemNamespace, options.ClusterID) election := leaderelection. NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.NamespaceController, m.revision, !configCluster, client). AddRunFunction(func(leaderStop <-chan struct
{ // Add an instance handler in the kubernetes registry to notify service entry store about pod events kubeRegistry.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler) }
conditional_block
main.go
_service_port_internal", "8443") viper.SetDefault("env_injector_exec_dir", "/azure-keyvault/") viper.AutomaticEnv() } func init() { flag.StringVar(&params.version, "version", "", "Version of this component.") flag.StringVar(&params.versionEnvImage, "versionenvimage", "", "Version of the env image component.") flag.StringVar(&params.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.cloudConfig, "cloudconfig", "/etc/kubernetes/azure.json", "Path to cloud config. Only required if this is not at default location /etc/kubernetes/azure.json") flag.StringVar(&params.logFormat, "logging-format", "text", "Log format - text or json.") } func main() { klog.InitFlags(nil) defer klog.Flush() flag.Parse() initConfig() if params.logFormat == "json" { klog.SetLogger(jsonlogs.JSONLogger) } akv2k8s.Version = params.version // logFormat := viper.GetString("log_format") // setLogFormat(logFormat) akv2k8s.LogVersion() config = azureKeyVaultConfig{ port: viper.GetString("port"), httpPort: viper.GetString("port_http"), authType: viper.GetString("auth_type"), serveMetrics: viper.GetBool("metrics_enabled"), tlsCertFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.crt"), tlsKeyFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.key"), useAuthService: viper.GetBool("use_auth_service"), authServiceName: viper.GetString("webhook_auth_service"), authServicePort: viper.GetString("webhook_auth_service_port"), authServicePortInternal: viper.GetString("webhook_auth_service_port_internal"), dockerImageInspectionTimeout: viper.GetInt("docker_image_inspection_timeout"), useAksCredentialsWithAcr: viper.GetBool("docker_image_inspection_use_acs_credentials"), injectorDir: viper.GetString("env_injector_exec_dir"), versionEnvImage: params.versionEnvImage, cloudConfig: params.cloudConfig, } activeSettings := []interface{}{ "webhookPort", config.port, "serveMetrics", config.serveMetrics, "authType", config.authType, "useAuthService", config.useAuthService, "useAksCredsWithAcr", config.useAksCredentialsWithAcr, "dockerInspectionTimeout", config.dockerImageInspectionTimeout, "cloudConfigPath", config.cloudConfig, } if config.useAuthService { activeSettings = append(activeSettings, "authServiceName", config.authServiceName, "authServicePort", config.authServicePort, "authServiceInternalPort", config.authServicePortInternal) } klog.InfoS("active settings", activeSettings...) mutator := mutating.MutatorFunc(vaultSecretsMutator) metricsRecorder := metrics.NewPrometheus(prometheus.DefaultRegisterer) logLevel := flag.Lookup("v").Value.String() klogLevel, err := strconv.Atoi(logLevel) if err != nil { klog.ErrorS(err, "failed to parse log level") klogLevel = 2 } internalLogger := &internalLog.Std{Debug: klogLevel >= 4} podHandler := handlerFor(mutating.WebhookConfig{Name: "azurekeyvault-secrets-pods", Obj: &corev1.Pod{}}, mutator, metricsRecorder, internalLogger) if config.useAuthService { caCertDir := viper.GetString("ca_cert_dir") if caCertDir == "" { klog.InfoS("missing env var - must exist to use auth service", "env", "CA_CERT_DIR") os.Exit(1) } caCertFile := filepath.Join(caCertDir, "tls.crt") caKeyFile := filepath.Join(caCertDir, "tls.key") config.caCert, err = ioutil.ReadFile(caCertFile) if err != nil { klog.ErrorS(err, "failed to read pem file for ca cert", "file", caCertFile) os.Exit(1) } config.caKey, err = ioutil.ReadFile(caKeyFile) if err != nil { klog.ErrorS(err, "failed to read pem file for ca key", "file", caKeyFile) os.Exit(1) } } if config.authType != "cloudConfig" { klog.V(4).InfoS("not using cloudConfig for auth - looking for azure key vault credentials in envrionment") cProvider, err := credentialprovider.NewFromEnvironment() if err != nil { klog.ErrorS(err, "failed to create credentials provider from environment for azure key vault") os.Exit(1) } config.credentials, err = cProvider.GetAzureKeyVaultCredentials() if err != nil { klog.ErrorS(err, "failed to get credentials for azure key vault") os.Exit(1) } } else { klog.V(4).InfoS("using cloudConfig for auth - reading credentials", "file", config.cloudConfig) f, err := os.Open(config.cloudConfig) if err != nil { klog.ErrorS(err, "failed to read azure config", "file", config.cloudConfig) os.Exit(1) } defer f.Close() cloudCnfProvider, err := credentialprovider.NewFromCloudConfig(f) if err != nil { klog.ErrorS(err, "failed to create cloud config provider for azure key vault", "file", config.cloudConfig) os.Exit(1) } config.credentials, err = cloudCnfProvider.GetAzureKeyVaultCredentials() if err != nil { klog.ErrorS(err, "failed to get azure key vault credentials", "file", config.cloudConfig) os.Exit(1) } } klog.V(4).InfoS("checking credentials by getting authorizer from credentials") _, err = config.credentials.Authorizer() if err != nil { klog.ErrorS(err, "failed to get authorizer from azure key vault credentials") os.Exit(1) } cfg, err := clientcmd.BuildConfigFromFlags(params.masterURL, params.kubeconfig) if err != nil { klog.ErrorS(err, "failed to build kube config", "master", params.masterURL, "kubeconfig", params.kubeconfig) os.Exit(1) } config.kubeClient, err = kubernetes.NewForConfig(cfg) if err != nil { klog.ErrorS(err, "failed to build kube clientset", "master", params.masterURL, "kubeconfig", params.kubeconfig) os.Exit(1) } wg := new(sync.WaitGroup) wg.Add(2) httpMux := http.NewServeMux() httpURL := fmt.Sprintf(":%s", config.httpPort) if config.serveMetrics { httpMux.Handle("/metrics", promhttp.Handler()) klog.InfoS("serving metrics endpoint", "path", fmt.Sprintf("%s/metrics", httpURL)) } httpMux.HandleFunc("/healthz", healthHandler) klog.InfoS("serving health endpoint", "path", fmt.Sprintf("%s/healthz", httpURL)) go func() { err := http.ListenAndServe(httpURL, httpMux) if err != nil { klog.ErrorS(err, "error serving metrics", "port", httpURL) os.Exit(1) } wg.Done() }() router := mux.NewRouter() tlsURL := fmt.Sprintf(":%s", config.port) router.Handle("/pods", podHandler) klog.InfoS("serving encrypted webhook endpoint", "path", fmt.Sprintf("%s/pods", tlsURL)) router.HandleFunc("/healthz", healthHandler) klog.InfoS("serving encrypted healthz endpoint", "path", fmt.Sprintf("%s/healthz", tlsURL)) if config.useAuthService { wg.Add(1) authURL := fmt.Sprintf(":%s", config.authServicePortInternal) authRouter := mux.NewRouter() authRouter.HandleFunc("/auth/{namespace}/{pod}", authHandler) authServer := createServerWithMTLS(config.caCert, authRouter, authURL) klog.InfoS("serving encrypted auth endpoint", "path", fmt.Sprintf("%s/auth", authURL)) go func() { err := authServer.ListenAndServeTLS(config.tlsCertFile, config.tlsKeyFile) if err != nil { klog.ErrorS(err, "error serving auth", "port", authURL) os.Exit(1) } wg.Done() }() } go func() { server := createServer(router, tlsURL, nil) err := server.ListenAndServeTLS(config.tlsCertFile, config.tlsKeyFile) if err != nil { klog.ErrorS(err, "error serving endpoint", "port", tlsURL) os.Exit(1) } wg.Done() }() wg.Wait()
} func createServerWithMTLS(caCert []byte, router http.Handler, url string) *http.Server { clientCertPool := x509.NewCertPool()
random_line_split
main.go
lsKeyFile string caCert []byte caKey []byte authType string useAuthService bool dockerImageInspectionTimeout int useAksCredentialsWithAcr bool authServiceName string authServicePort string authServicePortInternal string kubeClient *kubernetes.Clientset credentials credentialprovider.AzureKeyVaultCredentials versionEnvImage string kubeconfig string masterURL string injectorDir string } type cmdParams struct { version string versionEnvImage string kubeconfig string masterURL string cloudConfig string logFormat string } var config azureKeyVaultConfig var params cmdParams var ( podsMutatedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_total", Help: "The total number of pods mutated", }) podsInspectedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_inspections_total", Help: "The total number of pods inspected, including mutated", }) podsMutatedFailedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_failed_total", Help: "The total number of attempted pod mutations that failed", }) ) const envVarReplacementKey = "@azurekeyvault" // func setLogFormat(logFormat string) { // switch logFormat { // case "fmt": // log.SetFormatter(&log.TextFormatter{ // DisableColors: true, // FullTimestamp: true, // }) // case "json": // log.SetFormatter(&log.JSONFormatter{}) // default: // log.Warnf("Log format %s not supported - using default fmt", logFormat) // } // } func vaultSecretsMutator(ctx context.Context, obj metav1.Object) (bool, error) { req := whcontext.GetAdmissionRequest(ctx) var pod *corev1.Pod switch v := obj.(type) { case *corev1.Pod: klog.InfoS("found pod to mutate", "pod", klog.KRef(req.Namespace, req.Name)) pod = v default: return false, nil } podsInspectedCounter.Inc() err := mutatePodSpec(pod, req.Namespace, req.UID) if err != nil { klog.ErrorS(err, "failed to mutate", "pod", klog.KRef(req.Namespace, req.Name)) podsMutatedFailedCounter.Inc() } return false, err } func handlerFor(config mutating.WebhookConfig, mutator mutating.MutatorFunc, recorder metrics.Recorder, logger internalLog.Logger) http.Handler { webhook, err := mutating.NewWebhook(config, mutator, nil, nil, logger) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } handler, err := whhttp.HandlerFor(webhook) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } return handler } func authHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { vars := mux.Vars(r) pod := podData{ name: vars["pod"], namespace: vars["namespace"], } if pod.name == "" || pod.namespace == "" { klog.InfoS("failed to parse url parameters", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusBadRequest) return } err := authorize(config.kubeClient, pod) if err != nil { klog.ErrorS(err, "failed to authorize request", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusForbidden) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) if err := json.NewEncoder(w).Encode(config.credentials); err != nil { klog.ErrorS(err, "failed to json encode token", "pod", pod.name, "namespace", pod.namespace) http.Error(w, err.Error(), http.StatusInternalServerError) } else { klog.InfoS("served oauth token", "pod", pod.name, "namespace", pod.namespace) } } else { klog.InfoS("invalid request method") http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } } func healthHandler(w http.ResponseWriter, r *http.Request) {
func initConfig() { viper.SetDefault("azurekeyvault_env_image", "spvest/azure-keyvault-env:latest") viper.SetDefault("docker_image_inspection_timeout", 20) viper.SetDefault("docker_image_inspection_use_acs_credentials", true) viper.SetDefault("auth_type", "cloudConfig") viper.SetDefault("use_auth_service", true) viper.SetDefault("metrics_enabled", false) viper.SetDefault("port_http", "80") viper.SetDefault("port", "443") viper.SetDefault("webhook_auth_service_port", "8443") viper.SetDefault("webhook_auth_service_port_internal", "8443") viper.SetDefault("env_injector_exec_dir", "/azure-keyvault/") viper.AutomaticEnv() } func init() { flag.StringVar(&params.version, "version", "", "Version of this component.") flag.StringVar(&params.versionEnvImage, "versionenvimage", "", "Version of the env image component.") flag.StringVar(&params.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.cloudConfig, "cloudconfig", "/etc/kubernetes/azure.json", "Path to cloud config. Only required if this is not at default location /etc/kubernetes/azure.json") flag.StringVar(&params.logFormat, "logging-format", "text", "Log format - text or json.") } func main() { klog.InitFlags(nil) defer klog.Flush() flag.Parse() initConfig() if params.logFormat == "json" { klog.SetLogger(jsonlogs.JSONLogger) } akv2k8s.Version = params.version // logFormat := viper.GetString("log_format") // setLogFormat(logFormat) akv2k8s.LogVersion() config = azureKeyVaultConfig{ port: viper.GetString("port"), httpPort: viper.GetString("port_http"), authType: viper.GetString("auth_type"), serveMetrics: viper.GetBool("metrics_enabled"), tlsCertFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.crt"), tlsKeyFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.key"), useAuthService: viper.GetBool("use_auth_service"), authServiceName: viper.GetString("webhook_auth_service"), authServicePort: viper.GetString("webhook_auth_service_port"), authServicePortInternal: viper.GetString("webhook_auth_service_port_internal"), dockerImageInspectionTimeout: viper.GetInt("docker_image_inspection_timeout"), useAksCredentialsWithAcr: viper.GetBool("docker_image_inspection_use_acs_credentials"), injectorDir: viper.GetString("env_injector_exec_dir"), versionEnvImage: params.versionEnvImage, cloudConfig: params.cloudConfig, } activeSettings := []interface{}{ "webhookPort", config.port, "serveMetrics", config.serveMetrics, "authType", config.authType, "useAuthService", config.useAuthService, "useAksCredsWithAcr", config.useAksCredentialsWithAcr, "dockerInspectionTimeout", config.dockerImageInspectionTimeout, "cloudConfigPath", config.cloudConfig, } if config.useAuthService { activeSettings = append(activeSettings, "authServiceName", config.authServiceName, "authServicePort", config.authServicePort, "authServiceInternalPort", config.authServicePortInternal) } klog.InfoS("active settings", activeSettings...) mutator := mutating.MutatorFunc(vaultSecretsMutator) metricsRecorder := metrics.NewPrometheus(prometheus.DefaultRegisterer) logLevel := flag.Lookup("v").Value.String() klogLevel, err := strconv.Atoi(logLevel) if err != nil { klog.ErrorS(err, "failed to parse log level") klogLevel = 2 } internalLogger := &internalLog.Std{Debug: klogLevel >= 4} podHandler := handlerFor(mutating.WebhookConfig{Name: "azurekeyvault-secrets-pods", Obj: &corev1.Pod{}}, mutator, metricsRecorder, internalLogger) if config.useAuthService { caCertDir := viper.GetString("ca_cert_dir") if caCertDir == "" { klog.InfoS("missing env var - must exist to use auth service
if r.Method == "GET" { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } }
identifier_body
main.go
lsKeyFile string caCert []byte caKey []byte authType string useAuthService bool dockerImageInspectionTimeout int useAksCredentialsWithAcr bool authServiceName string authServicePort string authServicePortInternal string kubeClient *kubernetes.Clientset credentials credentialprovider.AzureKeyVaultCredentials versionEnvImage string kubeconfig string masterURL string injectorDir string } type cmdParams struct { version string versionEnvImage string kubeconfig string masterURL string cloudConfig string logFormat string } var config azureKeyVaultConfig var params cmdParams var ( podsMutatedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_total", Help: "The total number of pods mutated", }) podsInspectedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_inspections_total", Help: "The total number of pods inspected, including mutated", }) podsMutatedFailedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_failed_total", Help: "The total number of attempted pod mutations that failed", }) ) const envVarReplacementKey = "@azurekeyvault" // func setLogFormat(logFormat string) { // switch logFormat { // case "fmt": // log.SetFormatter(&log.TextFormatter{ // DisableColors: true, // FullTimestamp: true, // }) // case "json": // log.SetFormatter(&log.JSONFormatter{}) // default: // log.Warnf("Log format %s not supported - using default fmt", logFormat) // } // } func v
ctx context.Context, obj metav1.Object) (bool, error) { req := whcontext.GetAdmissionRequest(ctx) var pod *corev1.Pod switch v := obj.(type) { case *corev1.Pod: klog.InfoS("found pod to mutate", "pod", klog.KRef(req.Namespace, req.Name)) pod = v default: return false, nil } podsInspectedCounter.Inc() err := mutatePodSpec(pod, req.Namespace, req.UID) if err != nil { klog.ErrorS(err, "failed to mutate", "pod", klog.KRef(req.Namespace, req.Name)) podsMutatedFailedCounter.Inc() } return false, err } func handlerFor(config mutating.WebhookConfig, mutator mutating.MutatorFunc, recorder metrics.Recorder, logger internalLog.Logger) http.Handler { webhook, err := mutating.NewWebhook(config, mutator, nil, nil, logger) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } handler, err := whhttp.HandlerFor(webhook) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } return handler } func authHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { vars := mux.Vars(r) pod := podData{ name: vars["pod"], namespace: vars["namespace"], } if pod.name == "" || pod.namespace == "" { klog.InfoS("failed to parse url parameters", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusBadRequest) return } err := authorize(config.kubeClient, pod) if err != nil { klog.ErrorS(err, "failed to authorize request", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusForbidden) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) if err := json.NewEncoder(w).Encode(config.credentials); err != nil { klog.ErrorS(err, "failed to json encode token", "pod", pod.name, "namespace", pod.namespace) http.Error(w, err.Error(), http.StatusInternalServerError) } else { klog.InfoS("served oauth token", "pod", pod.name, "namespace", pod.namespace) } } else { klog.InfoS("invalid request method") http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } } func healthHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } } func initConfig() { viper.SetDefault("azurekeyvault_env_image", "spvest/azure-keyvault-env:latest") viper.SetDefault("docker_image_inspection_timeout", 20) viper.SetDefault("docker_image_inspection_use_acs_credentials", true) viper.SetDefault("auth_type", "cloudConfig") viper.SetDefault("use_auth_service", true) viper.SetDefault("metrics_enabled", false) viper.SetDefault("port_http", "80") viper.SetDefault("port", "443") viper.SetDefault("webhook_auth_service_port", "8443") viper.SetDefault("webhook_auth_service_port_internal", "8443") viper.SetDefault("env_injector_exec_dir", "/azure-keyvault/") viper.AutomaticEnv() } func init() { flag.StringVar(&params.version, "version", "", "Version of this component.") flag.StringVar(&params.versionEnvImage, "versionenvimage", "", "Version of the env image component.") flag.StringVar(&params.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.cloudConfig, "cloudconfig", "/etc/kubernetes/azure.json", "Path to cloud config. Only required if this is not at default location /etc/kubernetes/azure.json") flag.StringVar(&params.logFormat, "logging-format", "text", "Log format - text or json.") } func main() { klog.InitFlags(nil) defer klog.Flush() flag.Parse() initConfig() if params.logFormat == "json" { klog.SetLogger(jsonlogs.JSONLogger) } akv2k8s.Version = params.version // logFormat := viper.GetString("log_format") // setLogFormat(logFormat) akv2k8s.LogVersion() config = azureKeyVaultConfig{ port: viper.GetString("port"), httpPort: viper.GetString("port_http"), authType: viper.GetString("auth_type"), serveMetrics: viper.GetBool("metrics_enabled"), tlsCertFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.crt"), tlsKeyFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.key"), useAuthService: viper.GetBool("use_auth_service"), authServiceName: viper.GetString("webhook_auth_service"), authServicePort: viper.GetString("webhook_auth_service_port"), authServicePortInternal: viper.GetString("webhook_auth_service_port_internal"), dockerImageInspectionTimeout: viper.GetInt("docker_image_inspection_timeout"), useAksCredentialsWithAcr: viper.GetBool("docker_image_inspection_use_acs_credentials"), injectorDir: viper.GetString("env_injector_exec_dir"), versionEnvImage: params.versionEnvImage, cloudConfig: params.cloudConfig, } activeSettings := []interface{}{ "webhookPort", config.port, "serveMetrics", config.serveMetrics, "authType", config.authType, "useAuthService", config.useAuthService, "useAksCredsWithAcr", config.useAksCredentialsWithAcr, "dockerInspectionTimeout", config.dockerImageInspectionTimeout, "cloudConfigPath", config.cloudConfig, } if config.useAuthService { activeSettings = append(activeSettings, "authServiceName", config.authServiceName, "authServicePort", config.authServicePort, "authServiceInternalPort", config.authServicePortInternal) } klog.InfoS("active settings", activeSettings...) mutator := mutating.MutatorFunc(vaultSecretsMutator) metricsRecorder := metrics.NewPrometheus(prometheus.DefaultRegisterer) logLevel := flag.Lookup("v").Value.String() klogLevel, err := strconv.Atoi(logLevel) if err != nil { klog.ErrorS(err, "failed to parse log level") klogLevel = 2 } internalLogger := &internalLog.Std{Debug: klogLevel >= 4} podHandler := handlerFor(mutating.WebhookConfig{Name: "azurekeyvault-secrets-pods", Obj: &corev1.Pod{}}, mutator, metricsRecorder, internalLogger) if config.useAuthService { caCertDir := viper.GetString("ca_cert_dir") if caCertDir == "" { klog.InfoS("missing env var - must exist to use auth service
aultSecretsMutator(
identifier_name
main.go
lsKeyFile string caCert []byte caKey []byte authType string useAuthService bool dockerImageInspectionTimeout int useAksCredentialsWithAcr bool authServiceName string authServicePort string authServicePortInternal string kubeClient *kubernetes.Clientset credentials credentialprovider.AzureKeyVaultCredentials versionEnvImage string kubeconfig string masterURL string injectorDir string } type cmdParams struct { version string versionEnvImage string kubeconfig string masterURL string cloudConfig string logFormat string } var config azureKeyVaultConfig var params cmdParams var ( podsMutatedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_total", Help: "The total number of pods mutated", }) podsInspectedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_inspections_total", Help: "The total number of pods inspected, including mutated", }) podsMutatedFailedCounter = promauto.NewCounter(prometheus.CounterOpts{ Name: "akv2k8s_pod_mutations_failed_total", Help: "The total number of attempted pod mutations that failed", }) ) const envVarReplacementKey = "@azurekeyvault" // func setLogFormat(logFormat string) { // switch logFormat { // case "fmt": // log.SetFormatter(&log.TextFormatter{ // DisableColors: true, // FullTimestamp: true, // }) // case "json": // log.SetFormatter(&log.JSONFormatter{}) // default: // log.Warnf("Log format %s not supported - using default fmt", logFormat) // } // } func vaultSecretsMutator(ctx context.Context, obj metav1.Object) (bool, error) { req := whcontext.GetAdmissionRequest(ctx) var pod *corev1.Pod switch v := obj.(type) { case *corev1.Pod: klog.InfoS("found pod to mutate", "pod", klog.KRef(req.Namespace, req.Name)) pod = v default: return false, nil } podsInspectedCounter.Inc() err := mutatePodSpec(pod, req.Namespace, req.UID) if err != nil { klog.ErrorS(err, "failed to mutate", "pod", klog.KRef(req.Namespace, req.Name)) podsMutatedFailedCounter.Inc() } return false, err } func handlerFor(config mutating.WebhookConfig, mutator mutating.MutatorFunc, recorder metrics.Recorder, logger internalLog.Logger) http.Handler { webhook, err := mutating.NewWebhook(config, mutator, nil, nil, logger) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } handler, err := whhttp.HandlerFor(webhook) if err != nil { klog.ErrorS(err, "error creating webhook") os.Exit(1) } return handler } func authHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { vars := mux.Vars(r) pod := podData{ name: vars["pod"], namespace: vars["namespace"], } if pod.name == "" || pod.namespace == "" { klog.InfoS("failed to parse url parameters", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusBadRequest) return } err := authorize(config.kubeClient, pod) if err != nil { klog.ErrorS(err, "failed to authorize request", "pod", pod.name, "namespace", pod.namespace) http.Error(w, "", http.StatusForbidden) return } w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(http.StatusOK) if err := json.NewEncoder(w).Encode(config.credentials); err != nil { klog.ErrorS(err, "failed to json encode token", "pod", pod.name, "namespace", pod.namespace) http.Error(w, err.Error(), http.StatusInternalServerError) } else { klog.InfoS("served oauth token", "pod", pod.name, "namespace", pod.namespace) } } else { klog.InfoS("invalid request method") http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } } func healthHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" {
else { http.Error(w, "Invalid request method", http.StatusMethodNotAllowed) } } func initConfig() { viper.SetDefault("azurekeyvault_env_image", "spvest/azure-keyvault-env:latest") viper.SetDefault("docker_image_inspection_timeout", 20) viper.SetDefault("docker_image_inspection_use_acs_credentials", true) viper.SetDefault("auth_type", "cloudConfig") viper.SetDefault("use_auth_service", true) viper.SetDefault("metrics_enabled", false) viper.SetDefault("port_http", "80") viper.SetDefault("port", "443") viper.SetDefault("webhook_auth_service_port", "8443") viper.SetDefault("webhook_auth_service_port_internal", "8443") viper.SetDefault("env_injector_exec_dir", "/azure-keyvault/") viper.AutomaticEnv() } func init() { flag.StringVar(&params.version, "version", "", "Version of this component.") flag.StringVar(&params.versionEnvImage, "versionenvimage", "", "Version of the env image component.") flag.StringVar(&params.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") flag.StringVar(&params.cloudConfig, "cloudconfig", "/etc/kubernetes/azure.json", "Path to cloud config. Only required if this is not at default location /etc/kubernetes/azure.json") flag.StringVar(&params.logFormat, "logging-format", "text", "Log format - text or json.") } func main() { klog.InitFlags(nil) defer klog.Flush() flag.Parse() initConfig() if params.logFormat == "json" { klog.SetLogger(jsonlogs.JSONLogger) } akv2k8s.Version = params.version // logFormat := viper.GetString("log_format") // setLogFormat(logFormat) akv2k8s.LogVersion() config = azureKeyVaultConfig{ port: viper.GetString("port"), httpPort: viper.GetString("port_http"), authType: viper.GetString("auth_type"), serveMetrics: viper.GetBool("metrics_enabled"), tlsCertFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.crt"), tlsKeyFile: fmt.Sprintf("%s/%s", viper.GetString("tls_cert_dir"), "tls.key"), useAuthService: viper.GetBool("use_auth_service"), authServiceName: viper.GetString("webhook_auth_service"), authServicePort: viper.GetString("webhook_auth_service_port"), authServicePortInternal: viper.GetString("webhook_auth_service_port_internal"), dockerImageInspectionTimeout: viper.GetInt("docker_image_inspection_timeout"), useAksCredentialsWithAcr: viper.GetBool("docker_image_inspection_use_acs_credentials"), injectorDir: viper.GetString("env_injector_exec_dir"), versionEnvImage: params.versionEnvImage, cloudConfig: params.cloudConfig, } activeSettings := []interface{}{ "webhookPort", config.port, "serveMetrics", config.serveMetrics, "authType", config.authType, "useAuthService", config.useAuthService, "useAksCredsWithAcr", config.useAksCredentialsWithAcr, "dockerInspectionTimeout", config.dockerImageInspectionTimeout, "cloudConfigPath", config.cloudConfig, } if config.useAuthService { activeSettings = append(activeSettings, "authServiceName", config.authServiceName, "authServicePort", config.authServicePort, "authServiceInternalPort", config.authServicePortInternal) } klog.InfoS("active settings", activeSettings...) mutator := mutating.MutatorFunc(vaultSecretsMutator) metricsRecorder := metrics.NewPrometheus(prometheus.DefaultRegisterer) logLevel := flag.Lookup("v").Value.String() klogLevel, err := strconv.Atoi(logLevel) if err != nil { klog.ErrorS(err, "failed to parse log level") klogLevel = 2 } internalLogger := &internalLog.Std{Debug: klogLevel >= 4} podHandler := handlerFor(mutating.WebhookConfig{Name: "azurekeyvault-secrets-pods", Obj: &corev1.Pod{}}, mutator, metricsRecorder, internalLogger) if config.useAuthService { caCertDir := viper.GetString("ca_cert_dir") if caCertDir == "" { klog.InfoS("missing env var - must exist to use auth service
w.WriteHeader(http.StatusOK) }
conditional_block
mod.rs
_abi::proto::oak::application::{ node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration, LogConfiguration, NodeConfiguration, }; use std::net::AddrParseError; use tokio::sync::oneshot; mod crypto; pub mod grpc; pub mod http; mod invocation; mod logger; mod roughtime; mod storage; mod wasm; /// Trait encapsulating execution of a Node or pseudo-Node. pub trait Node: Send { /// Returns a name for this type of Node. fn node_type(&self) -> &'static str; /// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm /// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes /// that are part of the runtime) have no restrictions enforced on external communications. /// /// Unless a node uses a trusted sandbox to restrict communications this function should always /// return [`NodeIsolation::Uncontrolled`] fn isolation(&self) -> NodeIsolation { NodeIsolation::Uncontrolled } /// Executes the Node, using the provided `Runtime` reference and initial handle. The method /// should continue execution until the Node terminates. /// /// `notify_receiver` receives a notification from the Runtime upon termination. This /// notification can be used by the Node to gracefully shut down. fn run( self: Box<Self>, runtime: RuntimeProxy, handle: oak_abi::Handle, notify_receiver: oneshot::Receiver<()>, ); } /// Indication of the level of isolation of a node. #[derive(Debug)] pub enum NodeIsolation { Sandboxed, Uncontrolled, } /// A enumeration for errors occurring when creating a new [`Node`] instance. // TODO(#1027): Improve or delete this enum. #[derive(Debug)] pub enum ConfigurationError { AddressParsingError(AddrParseError), IncorrectPort, IncorrectURI, NoHostElement, IncorrectWebAssemblyModuleName, InvalidNodeConfiguration, WasmiModuleInializationError(wasmi::Error), NodeCreationNotPermitted, } impl From<AddrParseError> for ConfigurationError { fn from(error: AddrParseError) -> Self { ConfigurationError::AddressParsingError(error) } } impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self { ConfigurationError::AddressParsingError(e) => { write!(f, "Failed to parse an address: {}", e) } ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"), ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"), ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"), ConfigurationError::IncorrectWebAssemblyModuleName => { write!(f, "Incorrect WebAssembly module name") } ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"), ConfigurationError::WasmiModuleInializationError(e) => { write!(f, "Failed to initialize wasmi::Module: {}", e) } ConfigurationError::NodeCreationNotPermitted => { write!(f, "Node creation not permitted") } } } } /// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud /// environments with WebAssembly support. pub struct ServerNodeFactory { pub application_configuration: ApplicationConfiguration, pub permissions_configuration: PermissionsConfiguration, pub secure_server_configuration: SecureServerConfiguration, pub signature_table: SignatureTable, pub kms_credentials: Option<std::path::PathBuf>, } impl NodeFactory<NodeConfiguration> for ServerNodeFactory { fn create_node( &self, node_name: &str, node_configuration: &NodeConfiguration, ) -> Result<CreatedNode, ConfigurationError> { if !self .permissions_configuration .allowed_creation(node_configuration) // TODO(#1027): Use anyhow or an improved ConfigurationError .map_err(|_| ConfigurationError::InvalidNodeConfiguration)? { return Err(ConfigurationError::NodeCreationNotPermitted); } match &node_configuration.config_type { Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode { instance: Box::new(crypto::CryptoNode::new( node_name, self.kms_credentials.clone(), )), // TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive // labelled plaintext data and emit unlabelled encrypted data (which would probably // mean top_privilege() goes here). privilege: NodePrivilege::default(), }), Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode { instance: Box::new(logger::LogNode::new(node_name)), // Allow the logger Node to declassify log messages in debug builds only. #[cfg(feature = "oak-unsafe")] privilege: NodePrivilege::top_privilege(), // The logger must not have any declassification privilege in non-debug builds. #[cfg(not(feature = "oak-unsafe"))] privilege: NodePrivilege::default(), }), Some(ConfigType::GrpcServerConfig(config)) => { let grpc_configuration = self .secure_server_configuration .grpc_config .clone() .expect("no gRPC identity provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(grpc::server::GrpcServerNode::new( node_name, config.clone(), grpc_configuration .grpc_server_tls_identity .as_ref() .expect("no gRPC server TLS identity provided to Oak Runtime") .clone(), grpc_configuration.oidc_client_info.clone(), )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the identity sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::WasmConfig(config)) =>
Some(ConfigType::GrpcClientConfig(config)) => { let grpc_client_root_tls_certificate = self .secure_server_configuration .clone() .grpc_config .expect("no gRPC identity provided to Oak Runtime") .grpc_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); let uri = config.uri.parse().map_err(|err| { warn!("could not parse URI {}: {:?}", config.uri, err); ConfigurationError::IncorrectURI })?; Ok(CreatedNode { instance: Box::new(grpc::client::GrpcClientNode::new( node_name, &uri, grpc_client_root_tls_certificate, )?), privilege: grpc::client::get_privilege(&uri), }) } Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode { instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)), privilege: NodePrivilege::default(), }), Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode { instance: Box::new(storage::StorageNode::new(node_name)), privilege: NodePrivilege::default(), }), Some(ConfigType::HttpServerConfig(config)) => { let tls_config = self .secure_server_configuration .http_config .clone() .expect("no TLS configuration for HTTP servers provided to Oak Runtime") .tls_config; Ok(CreatedNode { instance: Box::new(http::server::HttpServerNode::new( node_name, config.clone(), tls_config, )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the `identity` sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::HttpClientConfig(config)) => { let http_client_root_tls_certificate = self .secure_server_configuration .http_config .clone() .expect("no HTTP configuration provided to Oak Runtime") .http_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(http::client::HttpClientNode::new( node_name, config.clone(), http_client_root_tls_certificate, )?), privilege: http::client::get_privilege(&config.authority), }) } None => Err(ConfigurationError::InvalidNodeConfiguration),
{ let wasm_module_bytes = self .application_configuration .wasm_modules .get(&config.wasm_module_name) .ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?; Ok(CreatedNode { instance: Box::new(wasm::WasmNode::new( node_name, wasm_module_bytes, config.clone(), )?), privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table), }) }
conditional_block
mod.rs
oak_abi::proto::oak::application::{ node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration, LogConfiguration, NodeConfiguration, }; use std::net::AddrParseError; use tokio::sync::oneshot; mod crypto; pub mod grpc; pub mod http; mod invocation; mod logger; mod roughtime; mod storage; mod wasm; /// Trait encapsulating execution of a Node or pseudo-Node. pub trait Node: Send { /// Returns a name for this type of Node. fn node_type(&self) -> &'static str; /// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm /// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes /// that are part of the runtime) have no restrictions enforced on external communications. /// /// Unless a node uses a trusted sandbox to restrict communications this function should always /// return [`NodeIsolation::Uncontrolled`] fn isolation(&self) -> NodeIsolation { NodeIsolation::Uncontrolled } /// Executes the Node, using the provided `Runtime` reference and initial handle. The method /// should continue execution until the Node terminates. /// /// `notify_receiver` receives a notification from the Runtime upon termination. This /// notification can be used by the Node to gracefully shut down. fn run( self: Box<Self>, runtime: RuntimeProxy, handle: oak_abi::Handle, notify_receiver: oneshot::Receiver<()>, ); } /// Indication of the level of isolation of a node. #[derive(Debug)] pub enum NodeIsolation { Sandboxed, Uncontrolled, } /// A enumeration for errors occurring when creating a new [`Node`] instance. // TODO(#1027): Improve or delete this enum. #[derive(Debug)] pub enum ConfigurationError { AddressParsingError(AddrParseError), IncorrectPort, IncorrectURI, NoHostElement, IncorrectWebAssemblyModuleName, InvalidNodeConfiguration, WasmiModuleInializationError(wasmi::Error), NodeCreationNotPermitted, } impl From<AddrParseError> for ConfigurationError { fn from(error: AddrParseError) -> Self { ConfigurationError::AddressParsingError(error) } } impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self { ConfigurationError::AddressParsingError(e) => { write!(f, "Failed to parse an address: {}", e) } ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"), ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"), ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"), ConfigurationError::IncorrectWebAssemblyModuleName => { write!(f, "Incorrect WebAssembly module name") } ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"), ConfigurationError::WasmiModuleInializationError(e) => { write!(f, "Failed to initialize wasmi::Module: {}", e) } ConfigurationError::NodeCreationNotPermitted => { write!(f, "Node creation not permitted") } } } } /// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud /// environments with WebAssembly support. pub struct ServerNodeFactory { pub application_configuration: ApplicationConfiguration, pub permissions_configuration: PermissionsConfiguration, pub secure_server_configuration: SecureServerConfiguration, pub signature_table: SignatureTable, pub kms_credentials: Option<std::path::PathBuf>, } impl NodeFactory<NodeConfiguration> for ServerNodeFactory { fn create_node( &self, node_name: &str, node_configuration: &NodeConfiguration, ) -> Result<CreatedNode, ConfigurationError> { if !self .permissions_configuration .allowed_creation(node_configuration) // TODO(#1027): Use anyhow or an improved ConfigurationError .map_err(|_| ConfigurationError::InvalidNodeConfiguration)? { return Err(ConfigurationError::NodeCreationNotPermitted); } match &node_configuration.config_type { Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode { instance: Box::new(crypto::CryptoNode::new( node_name, self.kms_credentials.clone(), )), // TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive // labelled plaintext data and emit unlabelled encrypted data (which would probably // mean top_privilege() goes here). privilege: NodePrivilege::default(), }), Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode { instance: Box::new(logger::LogNode::new(node_name)), // Allow the logger Node to declassify log messages in debug builds only. #[cfg(feature = "oak-unsafe")] privilege: NodePrivilege::top_privilege(), // The logger must not have any declassification privilege in non-debug builds. #[cfg(not(feature = "oak-unsafe"))] privilege: NodePrivilege::default(), }), Some(ConfigType::GrpcServerConfig(config)) => { let grpc_configuration = self .secure_server_configuration .grpc_config .clone() .expect("no gRPC identity provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(grpc::server::GrpcServerNode::new( node_name, config.clone(), grpc_configuration .grpc_server_tls_identity .as_ref() .expect("no gRPC server TLS identity provided to Oak Runtime") .clone(), grpc_configuration.oidc_client_info.clone(), )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the identity sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::WasmConfig(config)) => { let wasm_module_bytes = self .application_configuration .wasm_modules .get(&config.wasm_module_name) .ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?; Ok(CreatedNode { instance: Box::new(wasm::WasmNode::new( node_name, wasm_module_bytes, config.clone(), )?), privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table), }) } Some(ConfigType::GrpcClientConfig(config)) => { let grpc_client_root_tls_certificate = self .secure_server_configuration .clone() .grpc_config .expect("no gRPC identity provided to Oak Runtime") .grpc_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); let uri = config.uri.parse().map_err(|err| { warn!("could not parse URI {}: {:?}", config.uri, err); ConfigurationError::IncorrectURI })?; Ok(CreatedNode { instance: Box::new(grpc::client::GrpcClientNode::new( node_name, &uri, grpc_client_root_tls_certificate, )?), privilege: grpc::client::get_privilege(&uri),
privilege: NodePrivilege::default(), }), Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode { instance: Box::new(storage::StorageNode::new(node_name)), privilege: NodePrivilege::default(), }), Some(ConfigType::HttpServerConfig(config)) => { let tls_config = self .secure_server_configuration .http_config .clone() .expect("no TLS configuration for HTTP servers provided to Oak Runtime") .tls_config; Ok(CreatedNode { instance: Box::new(http::server::HttpServerNode::new( node_name, config.clone(), tls_config, )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the `identity` sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::HttpClientConfig(config)) => { let http_client_root_tls_certificate = self .secure_server_configuration .http_config .clone() .expect("no HTTP configuration provided to Oak Runtime") .http_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(http::client::HttpClientNode::new( node_name, config.clone(), http_client_root_tls_certificate, )?), privilege: http::client::get_privilege(&config.authority), }) } None => Err(ConfigurationError::InvalidNodeConfiguration), }
}) } Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode { instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)),
random_line_split
mod.rs
_abi::proto::oak::application::{ node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration, LogConfiguration, NodeConfiguration, }; use std::net::AddrParseError; use tokio::sync::oneshot; mod crypto; pub mod grpc; pub mod http; mod invocation; mod logger; mod roughtime; mod storage; mod wasm; /// Trait encapsulating execution of a Node or pseudo-Node. pub trait Node: Send { /// Returns a name for this type of Node. fn node_type(&self) -> &'static str; /// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm /// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes /// that are part of the runtime) have no restrictions enforced on external communications. /// /// Unless a node uses a trusted sandbox to restrict communications this function should always /// return [`NodeIsolation::Uncontrolled`] fn isolation(&self) -> NodeIsolation { NodeIsolation::Uncontrolled } /// Executes the Node, using the provided `Runtime` reference and initial handle. The method /// should continue execution until the Node terminates. /// /// `notify_receiver` receives a notification from the Runtime upon termination. This /// notification can be used by the Node to gracefully shut down. fn run( self: Box<Self>, runtime: RuntimeProxy, handle: oak_abi::Handle, notify_receiver: oneshot::Receiver<()>, ); } /// Indication of the level of isolation of a node. #[derive(Debug)] pub enum NodeIsolation { Sandboxed, Uncontrolled, } /// A enumeration for errors occurring when creating a new [`Node`] instance. // TODO(#1027): Improve or delete this enum. #[derive(Debug)] pub enum ConfigurationError { AddressParsingError(AddrParseError), IncorrectPort, IncorrectURI, NoHostElement, IncorrectWebAssemblyModuleName, InvalidNodeConfiguration, WasmiModuleInializationError(wasmi::Error), NodeCreationNotPermitted, } impl From<AddrParseError> for ConfigurationError { fn from(error: AddrParseError) -> Self { ConfigurationError::AddressParsingError(error) } } impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self { ConfigurationError::AddressParsingError(e) => { write!(f, "Failed to parse an address: {}", e) } ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"), ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"), ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"), ConfigurationError::IncorrectWebAssemblyModuleName => { write!(f, "Incorrect WebAssembly module name") } ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"), ConfigurationError::WasmiModuleInializationError(e) => { write!(f, "Failed to initialize wasmi::Module: {}", e) } ConfigurationError::NodeCreationNotPermitted => { write!(f, "Node creation not permitted") } } } } /// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud /// environments with WebAssembly support. pub struct
{ pub application_configuration: ApplicationConfiguration, pub permissions_configuration: PermissionsConfiguration, pub secure_server_configuration: SecureServerConfiguration, pub signature_table: SignatureTable, pub kms_credentials: Option<std::path::PathBuf>, } impl NodeFactory<NodeConfiguration> for ServerNodeFactory { fn create_node( &self, node_name: &str, node_configuration: &NodeConfiguration, ) -> Result<CreatedNode, ConfigurationError> { if !self .permissions_configuration .allowed_creation(node_configuration) // TODO(#1027): Use anyhow or an improved ConfigurationError .map_err(|_| ConfigurationError::InvalidNodeConfiguration)? { return Err(ConfigurationError::NodeCreationNotPermitted); } match &node_configuration.config_type { Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode { instance: Box::new(crypto::CryptoNode::new( node_name, self.kms_credentials.clone(), )), // TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive // labelled plaintext data and emit unlabelled encrypted data (which would probably // mean top_privilege() goes here). privilege: NodePrivilege::default(), }), Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode { instance: Box::new(logger::LogNode::new(node_name)), // Allow the logger Node to declassify log messages in debug builds only. #[cfg(feature = "oak-unsafe")] privilege: NodePrivilege::top_privilege(), // The logger must not have any declassification privilege in non-debug builds. #[cfg(not(feature = "oak-unsafe"))] privilege: NodePrivilege::default(), }), Some(ConfigType::GrpcServerConfig(config)) => { let grpc_configuration = self .secure_server_configuration .grpc_config .clone() .expect("no gRPC identity provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(grpc::server::GrpcServerNode::new( node_name, config.clone(), grpc_configuration .grpc_server_tls_identity .as_ref() .expect("no gRPC server TLS identity provided to Oak Runtime") .clone(), grpc_configuration.oidc_client_info.clone(), )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the identity sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::WasmConfig(config)) => { let wasm_module_bytes = self .application_configuration .wasm_modules .get(&config.wasm_module_name) .ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?; Ok(CreatedNode { instance: Box::new(wasm::WasmNode::new( node_name, wasm_module_bytes, config.clone(), )?), privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table), }) } Some(ConfigType::GrpcClientConfig(config)) => { let grpc_client_root_tls_certificate = self .secure_server_configuration .clone() .grpc_config .expect("no gRPC identity provided to Oak Runtime") .grpc_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); let uri = config.uri.parse().map_err(|err| { warn!("could not parse URI {}: {:?}", config.uri, err); ConfigurationError::IncorrectURI })?; Ok(CreatedNode { instance: Box::new(grpc::client::GrpcClientNode::new( node_name, &uri, grpc_client_root_tls_certificate, )?), privilege: grpc::client::get_privilege(&uri), }) } Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode { instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)), privilege: NodePrivilege::default(), }), Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode { instance: Box::new(storage::StorageNode::new(node_name)), privilege: NodePrivilege::default(), }), Some(ConfigType::HttpServerConfig(config)) => { let tls_config = self .secure_server_configuration .http_config .clone() .expect("no TLS configuration for HTTP servers provided to Oak Runtime") .tls_config; Ok(CreatedNode { instance: Box::new(http::server::HttpServerNode::new( node_name, config.clone(), tls_config, )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the `identity` sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::HttpClientConfig(config)) => { let http_client_root_tls_certificate = self .secure_server_configuration .http_config .clone() .expect("no HTTP configuration provided to Oak Runtime") .http_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(http::client::HttpClientNode::new( node_name, config.clone(), http_client_root_tls_certificate, )?), privilege: http::client::get_privilege(&config.authority), }) } None => Err(ConfigurationError::InvalidNodeConfiguration), }
ServerNodeFactory
identifier_name
mod.rs
_abi::proto::oak::application::{ node_configuration::ConfigType, ApplicationConfiguration, CryptoConfiguration, LogConfiguration, NodeConfiguration, }; use std::net::AddrParseError; use tokio::sync::oneshot; mod crypto; pub mod grpc; pub mod http; mod invocation; mod logger; mod roughtime; mod storage; mod wasm; /// Trait encapsulating execution of a Node or pseudo-Node. pub trait Node: Send { /// Returns a name for this type of Node. fn node_type(&self) -> &'static str; /// Returns a value indicating the isolation of a Node. If a Node is sandboxed (e.g. a Wasm /// node), the sandbox restricts external communcations. Uncontrolled nodes (e.g pseudo Nodes /// that are part of the runtime) have no restrictions enforced on external communications. /// /// Unless a node uses a trusted sandbox to restrict communications this function should always /// return [`NodeIsolation::Uncontrolled`] fn isolation(&self) -> NodeIsolation { NodeIsolation::Uncontrolled } /// Executes the Node, using the provided `Runtime` reference and initial handle. The method /// should continue execution until the Node terminates. /// /// `notify_receiver` receives a notification from the Runtime upon termination. This /// notification can be used by the Node to gracefully shut down. fn run( self: Box<Self>, runtime: RuntimeProxy, handle: oak_abi::Handle, notify_receiver: oneshot::Receiver<()>, ); } /// Indication of the level of isolation of a node. #[derive(Debug)] pub enum NodeIsolation { Sandboxed, Uncontrolled, } /// A enumeration for errors occurring when creating a new [`Node`] instance. // TODO(#1027): Improve or delete this enum. #[derive(Debug)] pub enum ConfigurationError { AddressParsingError(AddrParseError), IncorrectPort, IncorrectURI, NoHostElement, IncorrectWebAssemblyModuleName, InvalidNodeConfiguration, WasmiModuleInializationError(wasmi::Error), NodeCreationNotPermitted, } impl From<AddrParseError> for ConfigurationError { fn from(error: AddrParseError) -> Self { ConfigurationError::AddressParsingError(error) } } impl std::fmt::Display for ConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { match self { ConfigurationError::AddressParsingError(e) => { write!(f, "Failed to parse an address: {}", e) } ConfigurationError::IncorrectPort => write!(f, "Incorrect port (must be > 1023)"), ConfigurationError::IncorrectURI => write!(f, "Incorrect URI"), ConfigurationError::NoHostElement => write!(f, "URI doesn't contain the Host element"), ConfigurationError::IncorrectWebAssemblyModuleName => { write!(f, "Incorrect WebAssembly module name") } ConfigurationError::InvalidNodeConfiguration => write!(f, "Invalid NodeConfiguration"), ConfigurationError::WasmiModuleInializationError(e) => { write!(f, "Failed to initialize wasmi::Module: {}", e) } ConfigurationError::NodeCreationNotPermitted => { write!(f, "Node creation not permitted") } } } } /// Implementation of [`NodeFactory`] for server-like Oak applications running on cloud /// environments with WebAssembly support. pub struct ServerNodeFactory { pub application_configuration: ApplicationConfiguration, pub permissions_configuration: PermissionsConfiguration, pub secure_server_configuration: SecureServerConfiguration, pub signature_table: SignatureTable, pub kms_credentials: Option<std::path::PathBuf>, } impl NodeFactory<NodeConfiguration> for ServerNodeFactory { fn create_node( &self, node_name: &str, node_configuration: &NodeConfiguration, ) -> Result<CreatedNode, ConfigurationError>
}), Some(ConfigType::LogConfig(LogConfiguration {})) => Ok(CreatedNode { instance: Box::new(logger::LogNode::new(node_name)), // Allow the logger Node to declassify log messages in debug builds only. #[cfg(feature = "oak-unsafe")] privilege: NodePrivilege::top_privilege(), // The logger must not have any declassification privilege in non-debug builds. #[cfg(not(feature = "oak-unsafe"))] privilege: NodePrivilege::default(), }), Some(ConfigType::GrpcServerConfig(config)) => { let grpc_configuration = self .secure_server_configuration .grpc_config .clone() .expect("no gRPC identity provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(grpc::server::GrpcServerNode::new( node_name, config.clone(), grpc_configuration .grpc_server_tls_identity .as_ref() .expect("no gRPC server TLS identity provided to Oak Runtime") .clone(), grpc_configuration.oidc_client_info.clone(), )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the identity sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::WasmConfig(config)) => { let wasm_module_bytes = self .application_configuration .wasm_modules .get(&config.wasm_module_name) .ok_or(ConfigurationError::IncorrectWebAssemblyModuleName)?; Ok(CreatedNode { instance: Box::new(wasm::WasmNode::new( node_name, wasm_module_bytes, config.clone(), )?), privilege: wasm::get_privilege(wasm_module_bytes, &self.signature_table), }) } Some(ConfigType::GrpcClientConfig(config)) => { let grpc_client_root_tls_certificate = self .secure_server_configuration .clone() .grpc_config .expect("no gRPC identity provided to Oak Runtime") .grpc_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); let uri = config.uri.parse().map_err(|err| { warn!("could not parse URI {}: {:?}", config.uri, err); ConfigurationError::IncorrectURI })?; Ok(CreatedNode { instance: Box::new(grpc::client::GrpcClientNode::new( node_name, &uri, grpc_client_root_tls_certificate, )?), privilege: grpc::client::get_privilege(&uri), }) } Some(ConfigType::RoughtimeClientConfig(config)) => Ok(CreatedNode { instance: Box::new(roughtime::RoughtimeClientNode::new(node_name, config)), privilege: NodePrivilege::default(), }), Some(ConfigType::StorageConfig(_config)) => Ok(CreatedNode { instance: Box::new(storage::StorageNode::new(node_name)), privilege: NodePrivilege::default(), }), Some(ConfigType::HttpServerConfig(config)) => { let tls_config = self .secure_server_configuration .http_config .clone() .expect("no TLS configuration for HTTP servers provided to Oak Runtime") .tls_config; Ok(CreatedNode { instance: Box::new(http::server::HttpServerNode::new( node_name, config.clone(), tls_config, )?), // This node needs to have `top` privilege to be able to declassify data tagged // with any arbitrary user identities. // TODO(#1631): When we have a separate top for each sub-lattice, this should be // changed to the top of the `identity` sub-lattice. privilege: NodePrivilege::top_privilege(), }) } Some(ConfigType::HttpClientConfig(config)) => { let http_client_root_tls_certificate = self .secure_server_configuration .http_config .clone() .expect("no HTTP configuration provided to Oak Runtime") .http_client_root_tls_certificate .expect("no root TLS certificate provided to Oak Runtime"); Ok(CreatedNode { instance: Box::new(http::client::HttpClientNode::new( node_name, config.clone(), http_client_root_tls_certificate, )?), privilege: http::client::get_privilege(&config.authority), }) } None => Err(ConfigurationError::InvalidNodeConfiguration), }
{ if !self .permissions_configuration .allowed_creation(node_configuration) // TODO(#1027): Use anyhow or an improved ConfigurationError .map_err(|_| ConfigurationError::InvalidNodeConfiguration)? { return Err(ConfigurationError::NodeCreationNotPermitted); } match &node_configuration.config_type { Some(ConfigType::CryptoConfig(CryptoConfiguration {})) => Ok(CreatedNode { instance: Box::new(crypto::CryptoNode::new( node_name, self.kms_credentials.clone(), )), // TODO(#1842): sort out IFC interactions so that the crypto pseudo-Node can receive // labelled plaintext data and emit unlabelled encrypted data (which would probably // mean top_privilege() goes here). privilege: NodePrivilege::default(),
identifier_body
cli.go
Parameters contains parameters for list command. type ListParameters struct { Filters string PageLimit int64 PageMarker string Detail bool Count bool Shared bool ExcludeHRefs bool ParentFQName string ParentType string ParentUUIDs string BackrefUUIDs string // TODO(Daniel): handle RefUUIDs ObjectUUIDs string Fields string } // Resources define output format of list command. type Resources = map[string][]map[string]interface{} // ListResources lists resources with given schemaID using filters. func (c *CLI) ListResources(schemaID string, lp *ListParameters) (string, error) { if schemaID == "" { return c.showHelp("", listHelpTemplate) } var response map[string]interface{} if _, err := c.ReadWithQuery( context.Background(), pluralPath(schemaID), queryParameters(lp), &response, ); err != nil { return "", err } var r Resources var err error switch { case lp.Count: return encodeToYAML(response) case lp.Detail: r, err = makeOutputResourcesFromDetailedResponse(schemaID, response) default: r, err = makeOutputResources(schemaID, response) } if err != nil { return "", err } return encodeToYAML(r) } const listHelpTemplate = `List command possible usages: {% for schema in schemas %}contrail list {{ schema.ID }} {% endfor %}` func pluralPath(schemaID string) string { return "/" + models.SchemaIDToKind(schemaID) + "s" } func queryParameters(lp *ListParameters) url.Values { values := url.Values{} for k, v := range map[string]string{ services.FiltersKey: lp.Filters, services.PageLimitKey: strconv.FormatInt(lp.PageLimit, 10), services.PageMarkerKey: lp.PageMarker, services.DetailKey: strconv.FormatBool(lp.Detail), services.CountKey: strconv.FormatBool(lp.Count), services.SharedKey: strconv.FormatBool(lp.Shared), services.ExcludeHRefsKey: strconv.FormatBool(lp.ExcludeHRefs), services.ParentFQNameKey: lp.ParentFQName, services.ParentTypeKey: lp.ParentType, services.ParentUUIDsKey: lp.ParentUUIDs, services.BackrefUUIDsKey: lp.BackrefUUIDs, // TODO(Daniel): handle RefUUIDs services.ObjectUUIDsKey: lp.ObjectUUIDs, services.FieldsKey: lp.Fields, } { if !isZeroValue(v) { values.Set(k, v) } } return values } func isZeroValue(value interface{}) bool { return value == "" || value == 0 || value == false } // makeOutputResourcesFromDetailedResponse creates list command output in format compatible with Sync command input // based on API Server detailed response. func makeOutputResourcesFromDetailedResponse(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("detailed response should contain list of resources: %v", rawList) } for _, rawWrappedObject := range list { wrappedObject, ok := rawWrappedObject.(map[string]interface{}) if !ok { return nil, errors.Errorf("detailed response contains invalid data: %v", rawWrappedObject) } for _, object := range wrappedObject { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } } return r, nil } // makeOutputResources creates list command output in format compatible with Sync command input // based on API Server standard response. func makeOutputResources(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("response should contain list of resources: %v", rawList) } for _, object := range list { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } return r, nil } // SyncResources synchronizes state of resources specified in given file. func (c *CLI) SyncResources(filePath string) (string, error) { var req syncListRequest if err := fileutil.LoadFile(filePath, &req); err != nil { return "", err } for i := range req.Resources { req.Resources[i].Data = fileutil.YAMLtoJSONCompat(req.Resources[i].Data) } var response []syncResponse if _, err := c.Create(context.Background(), "/sync", req, &response); err != nil { return "", err } return encodeToYAML(syncListResponse{Resources: response}) } // SetResourceParameter sets parameter value of resource with given schemaID na UUID. func (c *CLI) SetResourceParameter(schemaID, uuid, yamlString string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, setHelpTemplate) } var data map[string]interface{} if err := yaml.Unmarshal([]byte(yamlString), &data); err != nil { return "", err } data["uuid"] = uuid _, err := c.Update( context.Background(), urlPath(schemaID, uuid), map[string]interface{}{ models.SchemaIDToKind(schemaID): fileutil.YAMLtoJSONCompat(data), }, nil, ) if err != nil { return "", err } return c.ShowResource(schemaID, uuid) } const setHelpTemplate = `Set command possible usages: {% for schema in schemas %}contrail set {{ schema.ID }} $UUID $YAML {% endfor %}` // DeleteResource deletes resource with given schemaID and UUID. func (c *CLI) DeleteResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, removeHelpTemplate) } response, err := c.EnsureDeleted(context.Background(), urlPath(schemaID, uuid), nil) if err != nil { return "", err } if response.StatusCode == http.StatusNotFound { c.log.WithField("path", urlPath(schemaID, uuid)).Debug("Not found") } return "", nil } const removeHelpTemplate = `Remove command possible usages: {% for schema in schemas %}contrail rm {{ schema.ID }} $UUID {% endfor %}` type deleteRequest struct { Kind string `json:"kind" yaml:"kind"` Data struct { UUID string `json:"uuid" yaml:"uuid"` } `json:"data" yaml:"data"` } type deleteListRequest struct { List []deleteRequest `json:"resources" yaml:"resources"` } // DeleteResources deletes multiple resources specified in given file. func (c *CLI) DeleteResources(filePath string) (string, error) { var request deleteListRequest if err := fileutil.LoadFile(filePath, &request); err != nil { return "", nil } for _, r := range request.List { if _, err := c.DeleteResource(r.Kind, r.Data.UUID); err != nil { return "", err } } return "", nil } func urlPath(schemaID, uuid string) string { return "/" + models.SchemaIDToKind(schemaID) + "/" + uuid } // ShowSchema returns schema with with given schemaID. func (c *CLI) ShowSchema(schemaID string) (string, error) { return c.showHelp(schemaID, schemaTemplate) } const schemaTemplate = ` {% for schema in schemas %} # {{ schema.Title }} {{ schema.Description }} - kind: {{ schema.ID }} data: {% for key, value in schema.JSONSchema.Properties %} {{ key }}: {{ value.Default }} # {{ value.Title }} ({{ value.Type }}) {% endfor %} {% endfor %}` func (c *CLI) showHelp(schemaID string, template string) (string, error) { api, err := c.fetchServerAPI(filepath.Join(c.schemaRoot, serverSchemaFile)) if err != nil { return "", err } if schemaID != "" { s := api.SchemaByID(schemaID) if s == nil { return "", errors.Errorf("schema %s not found", schemaID) } api.Schemas = []*schema.Schema{s} } tpl, err := pongo2.FromString(template) if err != nil { return "", err } o, err := tpl.Execute(pongo2.Context{"schemas": api.Schemas}) if err != nil { return "", err } return o, nil } func (c *CLI) fetchServerAPI(serverSchema string) (*schema.API, error) { var api schema.API for i := 0; i < retryMax; i++
{ _, err := c.Read(context.Background(), serverSchema, &api) if err == nil { break } logrus.WithError(err).Warn("Failed to connect API Server - reconnecting") time.Sleep(time.Second) }
conditional_block
cli.go
(*CLI, error) { return NewCLI( &HTTPConfig{ ID: viper.GetString("client.id"), Password: viper.GetString("client.password"), Endpoint: viper.GetString("client.endpoint"), AuthURL: viper.GetString("keystone.authurl"), Scope: keystone.NewScope( viper.GetString("client.domain_id"), viper.GetString("client.domain_name"), viper.GetString("client.project_id"), viper.GetString("client.project_name"), ), Insecure: viper.GetBool("insecure"), }, viper.GetString("client.schema_root"), ) } // NewCLI returns new logged in CLI Client. func NewCLI(c *HTTPConfig, schemaRoot string) (*CLI, error) { client := NewHTTP(c) if err := client.Login(context.Background()); err != nil { return nil, err } return &CLI{ HTTP: *client, schemaRoot: schemaRoot, log: logutil.NewLogger("cli"), }, nil } // ShowResource shows resource with given schemaID and UUID. func (c *CLI) ShowResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, showHelpTemplate) } var response map[string]interface{} _, err := c.Read(context.Background(), urlPath(schemaID, uuid), &response) if err != nil { return "", err } data, ok := response[models.SchemaIDToKind(schemaID)].(map[string]interface{}) if !ok { return "", errors.Errorf( "resource in response is not a JSON object: %v", response[models.SchemaIDToKind(schemaID)], ) } return encodeToYAML(syncListResponse{Resources: []syncResponse{{Data: data, Kind: schemaID}}}) } type syncData struct { Operation string `json:"operation" yaml:"operation"` Kind string `json:"kind" yaml:"kind"` Data interface{} `json:"data" yaml:"data"` } type syncListData struct { Resources []syncData `json:"resources" yaml:"resources"` } type syncResponse = syncData type syncListResponse = syncListData type syncListRequest = syncListData const showHelpTemplate = `Show command possible usages: {% for schema in schemas %}contrail show {{ schema.ID }} $UUID {% endfor %}` // ListParameters contains parameters for list command. type ListParameters struct { Filters string PageLimit int64 PageMarker string Detail bool Count bool Shared bool ExcludeHRefs bool ParentFQName string ParentType string ParentUUIDs string BackrefUUIDs string // TODO(Daniel): handle RefUUIDs ObjectUUIDs string Fields string } // Resources define output format of list command. type Resources = map[string][]map[string]interface{} // ListResources lists resources with given schemaID using filters. func (c *CLI) ListResources(schemaID string, lp *ListParameters) (string, error) { if schemaID == "" { return c.showHelp("", listHelpTemplate) } var response map[string]interface{} if _, err := c.ReadWithQuery( context.Background(), pluralPath(schemaID), queryParameters(lp), &response, ); err != nil { return "", err } var r Resources var err error switch { case lp.Count: return encodeToYAML(response) case lp.Detail: r, err = makeOutputResourcesFromDetailedResponse(schemaID, response) default: r, err = makeOutputResources(schemaID, response) } if err != nil { return "", err } return encodeToYAML(r) } const listHelpTemplate = `List command possible usages: {% for schema in schemas %}contrail list {{ schema.ID }} {% endfor %}` func pluralPath(schemaID string) string { return "/" + models.SchemaIDToKind(schemaID) + "s" } func queryParameters(lp *ListParameters) url.Values { values := url.Values{} for k, v := range map[string]string{ services.FiltersKey: lp.Filters, services.PageLimitKey: strconv.FormatInt(lp.PageLimit, 10), services.PageMarkerKey: lp.PageMarker, services.DetailKey: strconv.FormatBool(lp.Detail), services.CountKey: strconv.FormatBool(lp.Count), services.SharedKey: strconv.FormatBool(lp.Shared), services.ExcludeHRefsKey: strconv.FormatBool(lp.ExcludeHRefs), services.ParentFQNameKey: lp.ParentFQName, services.ParentTypeKey: lp.ParentType, services.ParentUUIDsKey: lp.ParentUUIDs, services.BackrefUUIDsKey: lp.BackrefUUIDs, // TODO(Daniel): handle RefUUIDs services.ObjectUUIDsKey: lp.ObjectUUIDs, services.FieldsKey: lp.Fields, } { if !isZeroValue(v) { values.Set(k, v) } } return values } func isZeroValue(value interface{}) bool { return value == "" || value == 0 || value == false } // makeOutputResourcesFromDetailedResponse creates list command output in format compatible with Sync command input // based on API Server detailed response. func makeOutputResourcesFromDetailedResponse(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("detailed response should contain list of resources: %v", rawList) } for _, rawWrappedObject := range list { wrappedObject, ok := rawWrappedObject.(map[string]interface{}) if !ok { return nil, errors.Errorf("detailed response contains invalid data: %v", rawWrappedObject) } for _, object := range wrappedObject { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } } return r, nil } // makeOutputResources creates list command output in format compatible with Sync command input // based on API Server standard response. func makeOutputResources(schemaID string, response map[string]interface{}) (Resources, error)
// SyncResources synchronizes state of resources specified in given file. func (c *CLI) SyncResources(filePath string) (string, error) { var req syncListRequest if err := fileutil.LoadFile(filePath, &req); err != nil { return "", err } for i := range req.Resources { req.Resources[i].Data = fileutil.YAMLtoJSONCompat(req.Resources[i].Data) } var response []syncResponse if _, err := c.Create(context.Background(), "/sync", req, &response); err != nil { return "", err } return encodeToYAML(syncListResponse{Resources: response}) } // SetResourceParameter sets parameter value of resource with given schemaID na UUID. func (c *CLI) SetResourceParameter(schemaID, uuid, yamlString string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, setHelpTemplate) } var data map[string]interface{} if err := yaml.Unmarshal([]byte(yamlString), &data); err != nil { return "", err } data["uuid"] = uuid _, err := c.Update( context.Background(), urlPath(schemaID, uuid), map[string]interface{}{ models.SchemaIDToKind(schemaID): fileutil.YAMLtoJSONCompat(data), }, nil, ) if err != nil { return "", err } return c.ShowResource(schemaID, uuid) } const setHelpTemplate = `Set command possible usages: {% for schema in schemas %}contrail set {{ schema.ID }} $UUID $YAML {% endfor %}` // DeleteResource deletes resource with given schemaID and UUID. func (c *CLI) DeleteResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, removeHelpTemplate) } response, err := c.EnsureDeleted(context.Background(), urlPath(schemaID, uuid), nil) if err != nil { return "", err } if response.StatusCode == http.StatusNotFound { c.log.WithField("path", urlPath(schemaID, uuid)).Debug("Not found") } return "", nil } const removeHelpTemplate = `Remove command possible usages: {% for schema in schemas %}contrail rm {{ schema.ID }} $UUID {% endfor %}` type deleteRequest struct { Kind string `json:"kind" yaml:"kind"` Data struct { UUID string
{ r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("response should contain list of resources: %v", rawList) } for _, object := range list { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } return r, nil }
identifier_body
cli.go
ResourcesKey = "resources" ) const ( retryMax = 5 serverSchemaFile = "schema.json" ) // CLI represents API Server's command line interface. type CLI struct { HTTP schemaRoot string log *logrus.Entry } // NewCLIByViper returns new logged in CLI client using Viper configuration. func NewCLIByViper() (*CLI, error) { return NewCLI( &HTTPConfig{ ID: viper.GetString("client.id"), Password: viper.GetString("client.password"), Endpoint: viper.GetString("client.endpoint"), AuthURL: viper.GetString("keystone.authurl"), Scope: keystone.NewScope( viper.GetString("client.domain_id"), viper.GetString("client.domain_name"), viper.GetString("client.project_id"), viper.GetString("client.project_name"), ), Insecure: viper.GetBool("insecure"), }, viper.GetString("client.schema_root"), ) } // NewCLI returns new logged in CLI Client. func NewCLI(c *HTTPConfig, schemaRoot string) (*CLI, error) { client := NewHTTP(c) if err := client.Login(context.Background()); err != nil { return nil, err } return &CLI{ HTTP: *client, schemaRoot: schemaRoot, log: logutil.NewLogger("cli"), }, nil } // ShowResource shows resource with given schemaID and UUID. func (c *CLI) ShowResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, showHelpTemplate) } var response map[string]interface{} _, err := c.Read(context.Background(), urlPath(schemaID, uuid), &response) if err != nil { return "", err } data, ok := response[models.SchemaIDToKind(schemaID)].(map[string]interface{}) if !ok { return "", errors.Errorf( "resource in response is not a JSON object: %v", response[models.SchemaIDToKind(schemaID)], ) } return encodeToYAML(syncListResponse{Resources: []syncResponse{{Data: data, Kind: schemaID}}}) } type syncData struct { Operation string `json:"operation" yaml:"operation"` Kind string `json:"kind" yaml:"kind"` Data interface{} `json:"data" yaml:"data"` } type syncListData struct { Resources []syncData `json:"resources" yaml:"resources"` } type syncResponse = syncData type syncListResponse = syncListData type syncListRequest = syncListData const showHelpTemplate = `Show command possible usages: {% for schema in schemas %}contrail show {{ schema.ID }} $UUID {% endfor %}` // ListParameters contains parameters for list command. type ListParameters struct { Filters string PageLimit int64 PageMarker string Detail bool Count bool Shared bool ExcludeHRefs bool ParentFQName string ParentType string ParentUUIDs string BackrefUUIDs string // TODO(Daniel): handle RefUUIDs ObjectUUIDs string Fields string } // Resources define output format of list command. type Resources = map[string][]map[string]interface{} // ListResources lists resources with given schemaID using filters. func (c *CLI) ListResources(schemaID string, lp *ListParameters) (string, error) { if schemaID == "" { return c.showHelp("", listHelpTemplate) } var response map[string]interface{} if _, err := c.ReadWithQuery( context.Background(), pluralPath(schemaID), queryParameters(lp), &response, ); err != nil { return "", err } var r Resources var err error switch { case lp.Count: return encodeToYAML(response) case lp.Detail: r, err = makeOutputResourcesFromDetailedResponse(schemaID, response) default: r, err = makeOutputResources(schemaID, response) } if err != nil { return "", err } return encodeToYAML(r) } const listHelpTemplate = `List command possible usages: {% for schema in schemas %}contrail list {{ schema.ID }} {% endfor %}` func pluralPath(schemaID string) string { return "/" + models.SchemaIDToKind(schemaID) + "s" } func queryParameters(lp *ListParameters) url.Values { values := url.Values{} for k, v := range map[string]string{ services.FiltersKey: lp.Filters, services.PageLimitKey: strconv.FormatInt(lp.PageLimit, 10), services.PageMarkerKey: lp.PageMarker, services.DetailKey: strconv.FormatBool(lp.Detail), services.CountKey: strconv.FormatBool(lp.Count), services.SharedKey: strconv.FormatBool(lp.Shared), services.ExcludeHRefsKey: strconv.FormatBool(lp.ExcludeHRefs), services.ParentFQNameKey: lp.ParentFQName, services.ParentTypeKey: lp.ParentType, services.ParentUUIDsKey: lp.ParentUUIDs, services.BackrefUUIDsKey: lp.BackrefUUIDs, // TODO(Daniel): handle RefUUIDs services.ObjectUUIDsKey: lp.ObjectUUIDs, services.FieldsKey: lp.Fields, } { if !isZeroValue(v) { values.Set(k, v) } } return values } func isZeroValue(value interface{}) bool { return value == "" || value == 0 || value == false } // makeOutputResourcesFromDetailedResponse creates list command output in format compatible with Sync command input // based on API Server detailed response. func makeOutputResourcesFromDetailedResponse(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("detailed response should contain list of resources: %v", rawList) } for _, rawWrappedObject := range list { wrappedObject, ok := rawWrappedObject.(map[string]interface{}) if !ok { return nil, errors.Errorf("detailed response contains invalid data: %v", rawWrappedObject) } for _, object := range wrappedObject { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } } return r, nil } // makeOutputResources creates list command output in format compatible with Sync command input // based on API Server standard response. func makeOutputResources(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("response should contain list of resources: %v", rawList) } for _, object := range list { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } return r, nil } // SyncResources synchronizes state of resources specified in given file. func (c *CLI) SyncResources(filePath string) (string, error) { var req syncListRequest if err := fileutil.LoadFile(filePath, &req); err != nil { return "", err } for i := range req.Resources { req.Resources[i].Data = fileutil.YAMLtoJSONCompat(req.Resources[i].Data) } var response []syncResponse if _, err := c.Create(context.Background(), "/sync", req, &response); err != nil { return "", err } return encodeToYAML(syncListResponse{Resources: response}) } // SetResourceParameter sets parameter value of resource with given schemaID na UUID. func (c *CLI) SetResourceParameter(schemaID, uuid, yamlString string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, setHelpTemplate) } var data map[string]interface{} if err := yaml.Unmarshal([]byte(yamlString), &data); err != nil { return "", err } data["uuid"] = uuid _, err := c.Update( context.Background(), urlPath(schemaID, uuid), map[string]interface{}{ models.SchemaIDToKind(schemaID): fileutil.YAMLtoJSONCompat(data), }, nil, ) if err != nil { return "", err } return c.ShowResource(schemaID, uuid) } const setHelpTemplate = `Set command possible usages: {% for schema in schemas %}contrail set {{ schema.ID }} $UUID $YAML {% endfor %}` // DeleteResource deletes resource with given schemaID and UUID. func (c *CLI) DeleteResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, removeHelpTemplate) } response, err := c.EnsureDeleted(context.Background(), urlPath(schemaID, uuid), nil) if err != nil
// YAML key names const ( DataKey = "data" KindKey = "kind"
random_line_split
cli.go
"), Scope: keystone.NewScope( viper.GetString("client.domain_id"), viper.GetString("client.domain_name"), viper.GetString("client.project_id"), viper.GetString("client.project_name"), ), Insecure: viper.GetBool("insecure"), }, viper.GetString("client.schema_root"), ) } // NewCLI returns new logged in CLI Client. func NewCLI(c *HTTPConfig, schemaRoot string) (*CLI, error) { client := NewHTTP(c) if err := client.Login(context.Background()); err != nil { return nil, err } return &CLI{ HTTP: *client, schemaRoot: schemaRoot, log: logutil.NewLogger("cli"), }, nil } // ShowResource shows resource with given schemaID and UUID. func (c *CLI) ShowResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, showHelpTemplate) } var response map[string]interface{} _, err := c.Read(context.Background(), urlPath(schemaID, uuid), &response) if err != nil { return "", err } data, ok := response[models.SchemaIDToKind(schemaID)].(map[string]interface{}) if !ok { return "", errors.Errorf( "resource in response is not a JSON object: %v", response[models.SchemaIDToKind(schemaID)], ) } return encodeToYAML(syncListResponse{Resources: []syncResponse{{Data: data, Kind: schemaID}}}) } type syncData struct { Operation string `json:"operation" yaml:"operation"` Kind string `json:"kind" yaml:"kind"` Data interface{} `json:"data" yaml:"data"` } type syncListData struct { Resources []syncData `json:"resources" yaml:"resources"` } type syncResponse = syncData type syncListResponse = syncListData type syncListRequest = syncListData const showHelpTemplate = `Show command possible usages: {% for schema in schemas %}contrail show {{ schema.ID }} $UUID {% endfor %}` // ListParameters contains parameters for list command. type ListParameters struct { Filters string PageLimit int64 PageMarker string Detail bool Count bool Shared bool ExcludeHRefs bool ParentFQName string ParentType string ParentUUIDs string BackrefUUIDs string // TODO(Daniel): handle RefUUIDs ObjectUUIDs string Fields string } // Resources define output format of list command. type Resources = map[string][]map[string]interface{} // ListResources lists resources with given schemaID using filters. func (c *CLI) ListResources(schemaID string, lp *ListParameters) (string, error) { if schemaID == "" { return c.showHelp("", listHelpTemplate) } var response map[string]interface{} if _, err := c.ReadWithQuery( context.Background(), pluralPath(schemaID), queryParameters(lp), &response, ); err != nil { return "", err } var r Resources var err error switch { case lp.Count: return encodeToYAML(response) case lp.Detail: r, err = makeOutputResourcesFromDetailedResponse(schemaID, response) default: r, err = makeOutputResources(schemaID, response) } if err != nil { return "", err } return encodeToYAML(r) } const listHelpTemplate = `List command possible usages: {% for schema in schemas %}contrail list {{ schema.ID }} {% endfor %}` func pluralPath(schemaID string) string { return "/" + models.SchemaIDToKind(schemaID) + "s" } func queryParameters(lp *ListParameters) url.Values { values := url.Values{} for k, v := range map[string]string{ services.FiltersKey: lp.Filters, services.PageLimitKey: strconv.FormatInt(lp.PageLimit, 10), services.PageMarkerKey: lp.PageMarker, services.DetailKey: strconv.FormatBool(lp.Detail), services.CountKey: strconv.FormatBool(lp.Count), services.SharedKey: strconv.FormatBool(lp.Shared), services.ExcludeHRefsKey: strconv.FormatBool(lp.ExcludeHRefs), services.ParentFQNameKey: lp.ParentFQName, services.ParentTypeKey: lp.ParentType, services.ParentUUIDsKey: lp.ParentUUIDs, services.BackrefUUIDsKey: lp.BackrefUUIDs, // TODO(Daniel): handle RefUUIDs services.ObjectUUIDsKey: lp.ObjectUUIDs, services.FieldsKey: lp.Fields, } { if !isZeroValue(v) { values.Set(k, v) } } return values } func isZeroValue(value interface{}) bool { return value == "" || value == 0 || value == false } // makeOutputResourcesFromDetailedResponse creates list command output in format compatible with Sync command input // based on API Server detailed response. func makeOutputResourcesFromDetailedResponse(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("detailed response should contain list of resources: %v", rawList) } for _, rawWrappedObject := range list { wrappedObject, ok := rawWrappedObject.(map[string]interface{}) if !ok { return nil, errors.Errorf("detailed response contains invalid data: %v", rawWrappedObject) } for _, object := range wrappedObject { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } } return r, nil } // makeOutputResources creates list command output in format compatible with Sync command input // based on API Server standard response. func makeOutputResources(schemaID string, response map[string]interface{}) (Resources, error) { r := Resources{} for _, rawList := range response { list, ok := rawList.([]interface{}) if !ok { return nil, errors.Errorf("response should contain list of resources: %v", rawList) } for _, object := range list { r[ResourcesKey] = append(r[ResourcesKey], map[string]interface{}{ KindKey: schemaID, DataKey: object, }) } } return r, nil } // SyncResources synchronizes state of resources specified in given file. func (c *CLI) SyncResources(filePath string) (string, error) { var req syncListRequest if err := fileutil.LoadFile(filePath, &req); err != nil { return "", err } for i := range req.Resources { req.Resources[i].Data = fileutil.YAMLtoJSONCompat(req.Resources[i].Data) } var response []syncResponse if _, err := c.Create(context.Background(), "/sync", req, &response); err != nil { return "", err } return encodeToYAML(syncListResponse{Resources: response}) } // SetResourceParameter sets parameter value of resource with given schemaID na UUID. func (c *CLI) SetResourceParameter(schemaID, uuid, yamlString string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, setHelpTemplate) } var data map[string]interface{} if err := yaml.Unmarshal([]byte(yamlString), &data); err != nil { return "", err } data["uuid"] = uuid _, err := c.Update( context.Background(), urlPath(schemaID, uuid), map[string]interface{}{ models.SchemaIDToKind(schemaID): fileutil.YAMLtoJSONCompat(data), }, nil, ) if err != nil { return "", err } return c.ShowResource(schemaID, uuid) } const setHelpTemplate = `Set command possible usages: {% for schema in schemas %}contrail set {{ schema.ID }} $UUID $YAML {% endfor %}` // DeleteResource deletes resource with given schemaID and UUID. func (c *CLI) DeleteResource(schemaID, uuid string) (string, error) { if schemaID == "" || uuid == "" { return c.showHelp(schemaID, removeHelpTemplate) } response, err := c.EnsureDeleted(context.Background(), urlPath(schemaID, uuid), nil) if err != nil { return "", err } if response.StatusCode == http.StatusNotFound { c.log.WithField("path", urlPath(schemaID, uuid)).Debug("Not found") } return "", nil } const removeHelpTemplate = `Remove command possible usages: {% for schema in schemas %}contrail rm {{ schema.ID }} $UUID {% endfor %}` type deleteRequest struct { Kind string `json:"kind" yaml:"kind"` Data struct { UUID string `json:"uuid" yaml:"uuid"` } `json:"data" yaml:"data"` } type deleteListRequest struct { List []deleteRequest `json:"resources" yaml:"resources"` } // DeleteResources deletes multiple resources specified in given file. func (c *CLI)
DeleteResources
identifier_name
Payment-temp.js
return new Promise((resolve) => { const script = document.createElement("script"); script.src = src; script.onload = () => { resolve(true); }; script.onerror = () => { resolve(false); }; document.body.appendChild(script); }); }; const _DEV_ = document.domain === "localhost"; export default function HorizontalLabelPositionBelowStepper() { const navigate = useNavigate(); const buyerId = useParams().buyerId; const [buyerData, setBuyerData] = useState(); const { token } = useAuth(); const [finalMessage, setFinalMessage] = useState(false); const classes = useStyles(); const [activeStep, setActiveStep] = React.useState(0); const steps = getSteps(); const handleNext = () => { setActiveStep((prevActiveStep) => prevActiveStep + 1); }; const handleBack = () => { setActiveStep((prevActiveStep) => prevActiveStep - 1); }; const handleReset = () => { setActiveStep(0); }; useEffect(() => { const getbuyerData = async () => { const response = await axios.get(`http://localhost:5000/seller/buyer/${buyerId}`, { headers: { "x-access-token": token }, }); console.log(response); const data = await response.data; console.log(data); setBuyerData(response.data); }; getbuyerData(); }, [token, buyerId]); ////////////////////////////////////////////////////////////////////////// const displayRazorPay = async () => { console.log(token); const res = await loadScript("https://checkout.razorpay.com/v1/checkout.js"); if (!res) { alert("razorpay sdk failed to load. are u online"); return; } // const data = await fetch("http://localhost:5000/buyer/checkout", { // method: "POST", // }).then((t) => t.json()); // const data = await axios.post(`http://localhost:5000/buyer/checkout`, { // headers: { "x-access-token": token }, // }); const data = await fetch(`http://localhost:5000/buyer/checkout`, { method: "POST", headers: { "x-access-token": token, }, }).then((t) => t.json()); console.log(data); var options = { key: _DEV_ ? "rzp_test_5AmHwMVymTPMzT" : "PRODUCTION_KEY", // Enter the Key ID generated from the Dashboard amount: data.amount.toString(), // Amount is in currency subunits. Default currency is INR. Hence, 50000 refers to 50000 paise currency: data.currency, name: "Payment", description: "Test Transaction", image: "https://example.com/your_logo", order_id: data.id, //This is a sample Order ID. Pass the `id` obtained in the response of Step 1 handler: function async(response) { alert(response.razorpay_payment_id); alert(response.razorpay_order_id); alert(response.razorpay_signature); const sendVerify = async (response) => { console.log(response); const details = { razorpay_order_id: response.razorpay_order_id, razorpay_payment_id: response.razorpay_payment_id, razorpay_signature: response.razorpay_signature, }; const res = await axios .post(`http://localhost:5000/buyer/payment/verify`, details, { headers: { "x-access-token": token, }, }) .then(setFinalMessage(true)); console.log(res); }; sendVerify(response); }, prefill: { name: "Ankur", email: "[email protected]", contact: "9999999999", }, }; var paymentObject = new window.Razorpay(options); // document.getElementById("rzp-button1").onclick = function (e) { // rzp1.open(); // e.preventDefault(); // }; paymentObject.open(); paymentObject.on("payment.failed", function (response) { alert(response.error.code); alert(response.error.description); alert(response.error.source); alert(response.error.step); alert(response.error.reason); alert(response.error.metadata.order_id); alert(response.error.metadata.payment_id); }); }; ///////////////////////////////////////////////////////////////////// const CODhandler = async () => { const response = await axios.get(`http://localhost:5000/buyer/COD`, { headers: { "x-access-token": token, }, }); // .then(alert("checkout complete please close this window")); setFinalMessage(true); console.log(response); }; return ( <div className={classes.root}> {/* <button onClick={()=>{console.log(buyerData);}} >vlivk</button> */} <Navigation /> <Stepper activeStep={activeStep} alternativeLabel> {steps.map((label) => ( <Step key={label}> <StepLabel>{label}</StepLabel> </Step> ))} </Stepper> <div> {activeStep === steps.length ? ( <div> <Typography className={classes.instructions}>All steps completed</Typography> <Button onClick={handleReset}>Reset</Button> </div> ) : ( <div> <Typography className={classes.instructions}> {getStepContent(activeStep, buyerData)} </Typography> <div className="containerOverride"> {/* <Button disabled={activeStep === 0} onClick={handleBack} className={classes.backButton} > Back </Button> */} {/* <Button style={{ position: "fixed", width: "45%", padding: "1rem", marginTop: "1rem", bottom: 0, left: 0, display: "flex", alignItems: "center", justifyContent: "center", marginLeft: 370, height: 50, color: "white", background: "aqua", }} variant="contained" color="" onClick={handleNext} > {activeStep === steps.length - 1 ? "Finish" : "Deliver Here"} </Button> */} <button // style={{ // // position: "fixed", // border: "none", // width: "20%", // padding: "1rem", // marginTop: "1rem", // display: "flex", // alignItems: "center", // justifyContent: "center", // marginLeft: 370, // height: 50, // color: "grey", // background: "aqua", // }} onClick={displayRazorPay} > click for online payment </button> <button // style={{ // border: "none", // width: "20%", // padding: "1rem", // marginTop: "1rem", // display: "flex", // alignItems: "center", // justifyContent: "center", // marginLeft: 370, // height: 50, // color: "grey", // background: "aqua", // }} onClick={CODhandler} > click for cash on delivery payment </button> </div> </div> )} </div> {finalMessage && ( <Link to={`/home`}>Payment Successful, click here to continue shopping </Link> )}
function getStepContent(stepIndex, buyerData) { switch (stepIndex) { case 0: return ( <div> {buyerData && ( <div style={{ background: "#ecf0f1", margin: "auto", width: 630 }}> <font color="red" style={{ color: "red", fontWieght: "bold" }}> <b>Delivery Address</b> </font> {"\n\n"} <br></br> <div></div> <div></div> Name:{buyerData.fullname} <br></br> MobileNo:{buyerData.phone} <br></br> EmailID:{buyerData.email} <br></br> Address:{buyerData.shopAddress} </div> )} </div> ); case 1: return ( <div style={{ marginLeft: 150, marginTop: 20, width: 630 }}> <h3>Select Payment Method</h3> <div style={{ background: "#ecf0f1", marginTop: 20, width: 630 }}>Cart</div> <div style={{ marginTop: 20 }}>Credit Debit & ATM Cards</div> <div style={{ marginTop: 20 }}>Sodexco Meal Pass</div> <div style={{ background: "#ecf0f1", marginTop
</div> ); }
random_line_split