code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__all__ = ['ShapSummaryComponent',
'ShapDependenceComponent',
'ShapSummaryDependenceConnector',
'InteractionSummaryComponent',
'InteractionDependenceComponent',
'InteractionSummaryDependenceConnector',
'ShapContributionsTableComponent',
'ShapContributionsTableComponentPerso',
'ShapContributionsGraphComponent']
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from ..dashboard_methods import *
class ShapSummaryComponent(ExplainerComponent):
def __init__(self, explainer, title='Shap Summary', name=None,
subtitle="Ordering features by shap value",
hide_title=False, hide_subtitle=False, hide_depth=False,
hide_type=False, hide_cats=False, hide_index=False, hide_selector=False,
pos_label=None, depth=None,
summary_type="aggregate", cats=True, index=None,
description=None, **kwargs):
"""Shows shap summary component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Shap Dependence Summary".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide the title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_depth (bool, optional): hide the depth toggle.
Defaults to False.
hide_type (bool, optional): hide the summary type toggle
(aggregated, detailed). Defaults to False.
hide_cats (bool, optional): hide the group cats toggle. Defaults to False.
hide_selector (bool, optional): hide pos label selector. Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
depth (int, optional): initial number of features to show. Defaults to None.
summary_type (str, {'aggregate', 'detailed'}. optional): type of
summary graph to show. Defaults to "aggregate".
cats (bool, optional): group cats. Defaults to True.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.explainer.cats is None or not self.explainer.cats:
self.hide_cats = True
if self.depth is not None:
self.depth = min(self.depth, self.explainer.n_features(cats))
self.index_name = 'shap-summary-index-'+self.name
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
if self.description is None: self.description = """
The shap summary summarizes the shap values per feature.
You can either select an aggregates display that shows mean absolute shap value
per feature. Or get a more detailed look at the spread of shap values per
feature and how they correlate the the feature value (red is high).
"""
self.register_dependencies('shap_values', 'shap_values_cats')
def layout(self):
return html.Div([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='shap-summary-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='shap-summary-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Form([
dbc.FormGroup(
[
dbc.Label("Summary Type: ", id='shap-summary-type-label-'+self.name,style=dict(marginLeft= 20,marginRight= 20)),
dbc.Tooltip("Display mean absolute SHAP value per feature (aggregate)"
" or display every single shap value per feature (detailed)",
target='shap-summary-type-label-'+self.name),
dbc.Select(
options=[
{"label": "Aggregate", "value": "aggregate"},
{"label": "Detailed", "value": "detailed"},
],
value=self.summary_type,
id="shap-summary-type-"+self.name,
)
]
)
],inline=True) ]), self.hide_type),
make_hideable(
dbc.Col([
dbc.Form([dbc.FormGroup([
dbc.Label("Depth:", id='shap-summary-depth-label-'+self.name,style=dict(marginRight= 20)),
dbc.Tooltip("Number of features to display",
target='shap-summary-depth-label-'+self.name),
dbc.Select(id='shap-summary-depth-'+self.name,
options=[{'label': str(i+1), 'value': i+1} for i in
range(self.explainer.n_features(self.cats))],
value=self.depth),
html.Br(),
]) ],inline=True) ]), self.hide_depth),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='shap-summary-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='shap-summary-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='shap-summary-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
]), self.hide_cats),
make_hideable(
dbc.Col([
html.Div([dbc.Form([dbc.FormGroup([
dbc.Label(f"{self.explainer.index_name}:", id='shap-summary-index-label-'+self.name),
dbc.Tooltip(f"Select {self.explainer.index_name} to highlight in plot. "
"You can also select by clicking on a scatter point in the graph.",
target='shap-summary-index-label-'+self.name),
dcc.Dropdown(id='shap-summary-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index),])],inline=True)
], id='shap-summary-index-col-'+self.name, style=dict(display="none")),
]), hide= True ),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector)
], form=True),
dcc.Loading(id="loading-dependence-shap-summary-"+self.name,
children=[dcc.Graph(id="shap-summary-graph-"+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
]),
])
def component_callbacks(self, app):
@app.callback(
Output('shap-summary-index-'+self.name, 'value'),
[Input('shap-summary-graph-'+self.name, 'clickData')])
def display_scatter_click_data(clickdata):
if clickdata is not None and clickdata['points'][0] is not None:
if isinstance(clickdata['points'][0]['y'], float): # detailed
index = clickdata['points'][0]['text'].split('=')[1].split('<br>')[0]
return index
raise PreventUpdate
@app.callback(
[Output('shap-summary-graph-'+self.name, 'figure'),
Output('shap-summary-depth-'+self.name, 'options'),
Output('shap-summary-index-col-'+self.name, 'style')],
[Input('shap-summary-type-'+self.name, 'value'),
Input('shap-summary-group-cats-'+self.name, 'value'),
Input('shap-summary-depth-'+self.name, 'value'),
Input('shap-summary-index-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_shap_summary_graph(summary_type, cats, depth, index, pos_label):
cats = bool(cats)
depth = None if depth is None else int(depth)
if summary_type == 'aggregate':
plot = self.explainer.plot_importances(
kind='shap', topx=depth, cats=cats, pos_label=pos_label)
elif summary_type == 'detailed':
plot = self.explainer.plot_shap_summary(
topx=depth, cats=cats, pos_label=pos_label, index=index)
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'shap-summary-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(cats))]
return (plot, depth_options, dash.no_update)
elif trigger == 'shap-summary-type-'+self.name:
if summary_type == 'aggregate':
return (plot, dash.no_update, dict(display="none"))
elif summary_type == 'detailed':
return (plot, dash.no_update, {})
else:
return (plot, dash.no_update, dash.no_update)
class ShapDependenceComponent(ExplainerComponent):
def __init__(self, explainer, title='Shap Dependence', name=None,
subtitle="Relationship between feature value and SHAP value",
hide_title=False, hide_subtitle=False, hide_cats=False, hide_col=False,
hide_color_col=False, hide_index=False,
hide_selector=False,
pos_label=None, cats=True, col=None,
color_col=None, index=None, description=None, **kwargs):
"""Show shap dependence graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Shap Dependence".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_cats (bool, optional): hide group cats toggle. Defaults to False.
hide_col (bool, optional): hide feature selector. Defaults to False.
hide_color_col (bool, optional): hide color feature selector Defaults to False.
hide_index (bool, optional): hide index selector Defaults to False.
hide_selector (bool, optional): hide pos label selector. Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
cats (bool, optional): group cats. Defaults to True.
col (str, optional): Feature to display. Defaults to None.
color_col (str, optional): Color plot by values of this Feature.
Defaults to None.
index (int, optional): Highlight a particular index. Defaults to None.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.col is None:
self.col = self.explainer.columns_ranked_by_shap(self.cats)[0]
if self.color_col is None:
self.color_col = self.explainer.shap_top_interactions(self.col, cats=cats)[1]
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
self.index_name = 'shap-dependence-index-'+self.name
if self.description is None: self.description = """
This plot shows the relation between feature values and shap values.
This allows you to investigate the general relationship between feature
value and impact on the prediction, i.e. "older passengers were predicted
to be less likely to survive the titanic". You can check whether the mode
uses features in line with your intuitions, or use the plots to learn
about the relationships that the model has learned between the input features
and the predicted outcome.
"""
self.register_dependencies('shap_values', 'shap_values_cats')
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='shap-dependence-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='shap-dependence-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
]),
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='shap-dependence-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='shap-dependence-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='shap-dependence-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=2), self.hide_cats),
make_hideable(
dbc.Col([
dbc.Label('Feature:', id='shap-dependence-col-label-'+self.name),
dbc.Tooltip("Select feature to display shap dependence for",
target='shap-dependence-col-label-'+self.name),
dbc.Select(id='shap-dependence-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.columns_ranked_by_shap(self.cats)],
value=self.col)
], md=3), self.hide_col),
make_hideable(dbc.Col([
html.Label('Color feature:', id='shap-dependence-color-col-label-'+self.name),
dbc.Tooltip("Select feature to color the scatter markers by. This "
"allows you to see interactions between various features in the graph.",
target='shap-dependence-color-col-label-'+self.name),
dbc.Select(id='shap-dependence-color-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.columns_ranked_by_shap(self.cats)],
value=self.color_col),
], md=3), self.hide_color_col),
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='shap-dependence-index-label-'+self.name),
dbc.Tooltip(f"Select {self.explainer.index_name} to highlight in the plot."
"You can also select by clicking on a scatter marker in the accompanying"
" shap summary plot (detailed).",
target='shap-dependence-index-label-'+self.name),
dcc.Dropdown(id='shap-dependence-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=4), hide=self.hide_index),
], form=True),
dcc.Loading(id="loading-dependence-graph-"+self.name,
children=[
dcc.Graph(id='shap-dependence-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
]),
])
def component_callbacks(self, app):
@app.callback(
[Output('shap-dependence-color-col-'+self.name, 'options'),
Output('shap-dependence-color-col-'+self.name, 'value')],
[Input('shap-dependence-col-'+self.name, 'value')],
[State('shap-dependence-group-cats-'+self.name, 'value'),
State('pos-label-'+self.name, 'value')])
def set_color_col_dropdown(col, cats, pos_label):
sorted_interact_cols = self.explainer.shap_top_interactions(
col, cats=bool(cats), pos_label=pos_label)
options = [{'label': col, 'value':col}
for col in sorted_interact_cols]
value = sorted_interact_cols[1]
return (options, value)
@app.callback(
Output('shap-dependence-graph-'+self.name, 'figure'),
[Input('shap-dependence-color-col-'+self.name, 'value'),
Input('shap-dependence-index-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')],
[State('shap-dependence-col-'+self.name, 'value')])
def update_dependence_graph(color_col, index, pos_label, col):
if col is not None:
return self.explainer.plot_shap_dependence(
col, color_col, highlight_index=index, pos_label=pos_label)
raise PreventUpdate
@app.callback(
Output('shap-dependence-col-'+self.name, 'options'),
[Input('shap-dependence-group-cats-'+self.name, 'value')],
[State('shap-dependence-col-'+self.name, 'value')])
def update_dependence_shap_scatter_graph(cats, old_col):
options = [{'label': col, 'value': col}
for col in self.explainer.columns_ranked_by_shap(bool(cats))]
return options
class ShapSummaryDependenceConnector(ExplainerComponent):
def __init__(self, shap_summary_component, shap_dependence_component):
"""Connects a ShapSummaryComponent with a ShapDependence Component:
- When group cats in ShapSummary, then group cats in ShapDependence
- When clicking on feature in ShapSummary, then select that feature in ShapDependence
Args:
shap_summary_component (ShapSummaryComponent): ShapSummaryComponent
shap_dependence_component (ShapDependenceComponent): ShapDependenceComponent
"""
self.sum_name = shap_summary_component.name
self.dep_name = shap_dependence_component.name
def component_callbacks(self, app):
@app.callback(
Output('shap-dependence-group-cats-'+self.dep_name, 'value'),
[Input('shap-summary-group-cats-'+self.sum_name, 'value')])
def update_dependence_shap_scatter_graph(cats):
return cats
@app.callback(
[Output('shap-dependence-index-'+self.dep_name, 'value'),
Output('shap-dependence-col-'+self.dep_name, 'value')],
[Input('shap-summary-graph-'+self.sum_name, 'clickData')])
def display_scatter_click_data(clickdata):
if clickdata is not None and clickdata['points'][0] is not None:
if isinstance(clickdata['points'][0]['y'], float): # detailed
index = clickdata['points'][0]['text'].split('=')[1].split('<br>')[0]
col = clickdata['points'][0]['text'].split('=')[1].split('<br>')[1]
return (index, col)
elif isinstance(clickdata['points'][0]['y'], str): # aggregate
# in aggregate clickdata returns col name -> type==str
col = clickdata['points'][0]['y'].split(' ')[1]
return (dash.no_update, col)
raise PreventUpdate
class InteractionSummaryComponent(ExplainerComponent):
def __init__(self, explainer, title="Interactions Summary", name=None,
subtitle="Ordering features by shap interaction value",
hide_title=False, hide_subtitle=False, hide_col=False, hide_depth=False,
hide_type=False, hide_cats=False, hide_index=False, hide_selector=False,
pos_label=None, col=None, depth=None,
summary_type="aggregate", cats=True, index=None, description=None,
**kwargs):
"""Show SHAP Interaciton values summary component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Interactions Summary".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): hide the component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_col (bool, optional): Hide the feature selector. Defaults to False.
hide_depth (bool, optional): Hide depth toggle. Defaults to False.
hide_type (bool, optional): Hide summary type toggle. Defaults to False.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_index (bool, optional): Hide the index selector. Defaults to False
hide_selector (bool, optional): hide pos label selector. Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
col (str, optional): Feature to show interaction summary for. Defaults to None.
depth (int, optional): Number of interaction features to display. Defaults to None.
summary_type (str, {'aggregate', 'detailed'}, optional): type of summary graph to display. Defaults to "aggregate".
cats (bool, optional): Group categorical features. Defaults to True.
index (str): Default index. Defaults to None.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.col is None:
self.col = self.explainer.columns_ranked_by_shap(self.cats)[0]
if self.depth is not None:
self.depth = min(self.depth, self.explainer.n_features(self.cats)-1)
if not self.explainer.cats:
self.hide_cats = True
self.index_name = 'interaction-summary-index-'+self.name
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
if self.description is None: self.description = """
Shows shap interaction values. Each shap value can be decomposed into a direct
effect and indirect effects. The indirect effects are due to interactions
of the feature with other feature. For example the fact that you know
the gender of a passenger on the titanic will have a direct effect (women
more likely to survive then men), but may also have indirect effects through
for example passenger class (first class women more likely to survive than
average woman, third class women less likely).
"""
self.register_dependencies("shap_interaction_values", "shap_interaction_values_cats")
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='interaction-summary-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='interaction-summary-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label("Feature", id='interaction-summary-col-label-'+self.name),
dbc.Tooltip("Feature to select interactions effects for",
target='interaction-summary-col-label-'+self.name),
dbc.Select(id='interaction-summary-col-'+self.name,
options=[{'label': col, 'value': col}
for col in self.explainer.columns_ranked_by_shap(self.cats)],
value=self.col),
], md=2), self.hide_col),
make_hideable(
dbc.Col([
dbc.Label("Depth:", id='interaction-summary-depth-label-'+self.name),
dbc.Tooltip("Number of interaction features to display",
target='interaction-summary-depth-label-'+self.name),
dbc.Select(id='interaction-summary-depth-'+self.name,
options = [{'label': str(i+1), 'value':i+1}
for i in range(self.explainer.n_features(self.cats)-1)],
value=self.depth)
], md=2), self.hide_depth),
make_hideable(
dbc.Col([
dbc.FormGroup(
[
dbc.Label("Summary Type", id='interaction-summary-type-label-'+self.name),
dbc.Tooltip("Display mean absolute SHAP value per feature (aggregate)"
" or display every single shap value per feature (detailed)",
target='interaction-summary-type-label-'+self.name),
dbc.Select(
options=[
{"label": "Aggregate", "value": "aggregate"},
{"label": "Detailed", "value": "detailed"},
],
value=self.summary_type,
id='interaction-summary-type-'+self.name,
),
]
)
], md=3), self.hide_type),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='interaction-summary-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='interaction-summary-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='interaction-summary-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
],md=2), self.hide_cats),
make_hideable(
dbc.Col([
html.Div([
dbc.Label(f"{self.explainer.index_name}:", id='interaction-summary-index-label-'+self.name),
dbc.Tooltip(f"Select {self.explainer.index_name} to highlight in plot. "
"You can also select by clicking on a scatter point in the graph.",
target='interaction-summary-index-label-'+self.name),
dcc.Dropdown(id='interaction-summary-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index),
], id='interaction-summary-index-col-'+self.name, style=dict(display="none")),
], md=3), hide=self.hide_index),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
], form=True),
dbc.Row([
dbc.Col([
dcc.Loading(id='loading-interaction-summary-graph-'+self.name,
children=[dcc.Graph(id='interaction-summary-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))])
])
]),
]),
])
def component_callbacks(self, app):
@app.callback(
Output('interaction-summary-index-'+self.name, 'value'),
[Input('interaction-summary-graph-'+self.name, 'clickData')])
def display_scatter_click_data(clickdata):
if clickdata is not None and clickdata['points'][0] is not None:
if isinstance(clickdata['points'][0]['y'], float): # detailed
index = clickdata['points'][0]['text'].split('=')[1].split('<br>')[0]
return index
raise PreventUpdate
@app.callback(
[Output('interaction-summary-depth-'+self.name, 'options'),
Output('interaction-summary-col-'+self.name, 'options')],
[Input('interaction-summary-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_interaction_scatter_graph(cats, pos_label):
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
new_cols = self.explainer.columns_ranked_by_shap(bool(cats), pos_label=pos_label)
new_col_options = [{'label': col, 'value':col} for col in new_cols]
return depth_options, new_col_options
@app.callback(
[Output('interaction-summary-graph-'+self.name, 'figure'),
Output('interaction-summary-index-col-'+self.name, 'style')],
[Input('interaction-summary-col-'+self.name, 'value'),
Input('interaction-summary-depth-'+self.name, 'value'),
Input('interaction-summary-type-'+self.name, 'value'),
Input('interaction-summary-index-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
Input('interaction-summary-group-cats-'+self.name, 'value')])
def update_interaction_scatter_graph(col, depth, summary_type, index, pos_label, cats):
if col is not None:
depth = None if depth is None else int(depth)
if summary_type=='aggregate':
plot = self.explainer.plot_interactions(
col, topx=depth, cats=bool(cats), pos_label=pos_label)
return plot, dict(display="none")
elif summary_type=='detailed':
plot = self.explainer.plot_shap_interaction_summary(
col, topx=depth, cats=bool(cats), pos_label=pos_label, index=index)
return plot, {}
raise PreventUpdate
class InteractionDependenceComponent(ExplainerComponent):
def __init__(self, explainer, title="Interaction Dependence", name=None,
subtitle="Relation between feature value and shap interaction value",
hide_title=False, hide_subtitle=False, hide_cats=False, hide_col=False,
hide_interact_col=False, hide_index=False,
hide_selector=False, hide_top=False, hide_bottom=False,
pos_label=None, cats=True, col=None, interact_col=None,
description=None, index=None, **kwargs):
"""Interaction Dependence Component.
Shows two graphs:
top graph: col vs interact_col
bottom graph: interact_col vs col
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Interactions Dependence".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): Hide component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_col (bool, optional): Hide feature selector. Defaults to False.
hide_interact_col (bool, optional): Hide interaction
feature selector. Defaults to False.
hide_highlight (bool, optional): Hide highlight index selector.
Defaults to False.
hide_selector (bool, optional): hide pos label selector.
Defaults to False.
hide_top (bool, optional): Hide the top interaction graph
(col vs interact_col). Defaults to False.
hide_bottom (bool, optional): hide the bottom interaction graph
(interact_col vs col). Defaults to False.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
cats (bool, optional): group categorical features. Defaults to True.
col (str, optional): Feature to find interactions for. Defaults to None.
interact_col (str, optional): Feature to interact with. Defaults to None.
highlight (int, optional): Index row to highlight Defaults to None.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
if self.col is None:
self.col = explainer.columns_ranked_by_shap(cats)[0]
if self.interact_col is None:
self.interact_col = explainer.shap_top_interactions(self.col, cats=cats)[1]
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
if self.description is None: self.description = """
This plot shows the relation between feature values and shap interaction values.
This allows you to investigate interactions between features in determining
the prediction of the model.
"""
self.register_dependencies("shap_interaction_values", "shap_interaction_values_cats")
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='interaction-dependence-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='interaction-dependence-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
]),
dbc.Row([
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='interaction-dependence-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='interaction-dependence-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='interaction-dependence-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=2), hide=self.hide_cats),
make_hideable(
dbc.Col([
dbc.Label('Feature:', id='interaction-dependence-col-label-'+self.name),
dbc.Tooltip("Select feature to display shap interactions for",
target='interaction-dependence-col-label-'+self.name),
dbc.Select(id='interaction-dependence-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.columns_ranked_by_shap(self.cats)],
value=self.col
),
], md=3), hide=self.hide_col),
make_hideable(
dbc.Col([
html.Label('Interaction feature:', id='interaction-dependence-interact-col-label-'+self.name),
dbc.Tooltip("Select feature to show interaction values for. Two plots will be shown: "
"both Feature vs Interaction Feature and Interaction Feature vs Feature.",
target='interaction-dependence-interact-col-label-'+self.name),
dbc.Select(id='interaction-dependence-interact-col-'+self.name,
options=[{'label': col, 'value':col}
for col in self.explainer.shap_top_interactions(col=self.col, cats=self.cats)],
value=self.interact_col
),
], md=3), hide=self.hide_interact_col),
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='interaction-dependence-index-label-'+self.name),
dbc.Tooltip(f"Select {self.explainer.index_name} to highlight in the plot."
"You can also select by clicking on a scatter marker in the accompanying"
" shap interaction summary plot (detailed).",
target='interaction-dependence-index-label-'+self.name),
dcc.Dropdown(id='interaction-dependence-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=4), hide=self.hide_index),
], form=True),
dbc.Row([
dbc.Col([
make_hideable(
dcc.Loading(id='loading-interaction-dependence-graph-'+self.name,
children=[dcc.Graph(id='interaction-dependence-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
hide=self.hide_top),
]),
]),
dbc.Row([
dbc.Col([
make_hideable(
dcc.Loading(id='loading-reverse-interaction-graph-'+self.name,
children=[dcc.Graph(id='interaction-dependence-reverse-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
hide=self.hide_bottom),
]),
]),
]),
])
def component_callbacks(self, app):
@app.callback(
Output('interaction-dependence-col-'+self.name, 'options'),
[Input('interaction-dependence-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_interaction_dependence_interact_col(cats, pos_label):
new_cols = self.explainer.columns_ranked_by_shap(bool(cats), pos_label=pos_label)
new_col_options = [{'label': col, 'value':col} for col in new_cols]
return new_col_options
@app.callback(
Output('interaction-dependence-interact-col-'+self.name, 'options'),
[Input('interaction-dependence-col-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')],
[State('interaction-dependence-group-cats-'+self.name, 'value'),
State('interaction-dependence-interact-col-'+self.name, 'value')])
def update_interaction_dependence_interact_col(col, pos_label, cats, old_interact_col):
if col is not None:
new_interact_cols = self.explainer.shap_top_interactions(
col, cats=bool(cats), pos_label=pos_label)
new_interact_options = [{'label': col, 'value':col} for col in new_interact_cols]
return new_interact_options
raise PreventUpdate
@app.callback(
[Output('interaction-dependence-graph-'+self.name, 'figure'),
Output('interaction-dependence-reverse-graph-'+self.name, 'figure')],
[Input('interaction-dependence-interact-col-'+self.name, 'value'),
Input('interaction-dependence-index-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
Input('interaction-dependence-col-'+self.name, 'value')])
def update_dependence_graph(interact_col, index, pos_label, col):
if col is not None and interact_col is not None:
return (self.explainer.plot_shap_interaction(
col, interact_col, highlight_index=index, pos_label=pos_label),
self.explainer.plot_shap_interaction(
interact_col, col, highlight_index=index, pos_label=pos_label))
raise PreventUpdate
class InteractionSummaryDependenceConnector(ExplainerComponent):
def __init__(self, interaction_summary_component, interaction_dependence_component):
"""Connects a InteractionSummaryComponent with an InteractionDependenceComponent:
- When group cats in Summary, then group cats in Dependence
- When select feature in summary, then select col in Dependence
- When clicking on interaction feature in Summary, then select that interaction
feature in Dependence.
Args:
shap_summary_component (ShapSummaryComponent): ShapSummaryComponent
shap_dependence_component (ShapDependenceComponent): ShapDependenceComponent
"""
self.sum_name = interaction_summary_component.name
self.dep_name = interaction_dependence_component.name
def component_callbacks(self, app):
@app.callback(
Output('interaction-dependence-group-cats-'+self.dep_name, 'value'),
[Input('interaction-summary-group-cats-'+self.sum_name, 'value')])
def update_dependence_shap_scatter_graph(cats):
return cats
@app.callback(
[Output('interaction-dependence-col-'+self.dep_name, 'value'),
Output('interaction-dependence-index-'+self.dep_name, 'value'),
Output('interaction-dependence-interact-col-'+self.dep_name, 'value')],
[Input('interaction-summary-col-'+self.sum_name, 'value'),
Input('interaction-summary-graph-'+self.sum_name, 'clickData')])
def update_interact_col_highlight(col, clickdata):
if clickdata is not None and clickdata['points'][0] is not None:
if isinstance(clickdata['points'][0]['y'], float): # detailed
index = clickdata['points'][0]['text'].split('=')[1].split('<br>')[0]
interact_col = clickdata['points'][0]['text'].split('=')[1].split('<br>')[1]
return (col, index, interact_col)
elif isinstance(clickdata['points'][0]['y'], str): # aggregate
# in aggregate clickdata returns col name -> type==str
interact_col = clickdata['points'][0]['y'].split(' ')[1]
return (col, dash.no_update, interact_col)
else:
return (col, dash.no_update, dash.no_update)
raise PreventUpdate
class ShapContributionsGraphComponent(ExplainerComponent):
def __init__(self, explainer, title="Contributions Plot", name=None,
subtitle="How has each feature contributed to the prediction?",
hide_title=False, hide_subtitle=False, hide_index=False, hide_depth=False,
hide_sort=False, hide_orientation=True, hide_cats=False,
hide_selector=False, feature_input_component=None,
pos_label=None, index=None, depth=None, sort='high-to-low',
orientation='vertical', cats=True, higher_is_better=True,
description=None, **kwargs):
"""Display Shap contributions to prediction graph component
Args:
explainer (Explainer): explainer object constructed , with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Contributions Plot".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): Hide component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_index (bool, optional): Hide index selector. Defaults to False.
hide_depth (bool, optional): Hide depth toggle. Defaults to False.
hide_sort (bool, optional): Hide the sorting dropdown. Defaults to False.
hide_orientation (bool, optional): Hide the orientation dropdown.
Defaults to True.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_selector (bool, optional): hide pos label selector. Defaults to False.
feature_input_component (FeatureInputComponent): A FeatureInputComponent
that will give the input to the graph instead of the index selector.
If not None, hide_index=True. Defaults to None.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
index ({int, bool}, optional): Initial index to display. Defaults to None.
depth (int, optional): Initial number of features to display. Defaults to None.
sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values.
Defaults to 'high-to-low'.
orientation ({'vertical', 'horizontal'}, optional): orientation of bar chart.
Defaults to 'vertical'.
cats (bool, optional): Group cats. Defaults to True.
higher_is_better (bool, optional): Color positive shap values green and
negative shap values red, or the reverse.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.index_name = 'contributions-graph-index-'+self.name
if self.depth is not None:
self.depth = min(self.depth, self.explainer.n_features(self.cats))
if not self.explainer.cats:
self.hide_cats = True
if self.feature_input_component is not None:
self.exclude_callbacks(self.feature_input_component)
self.hide_index = True
if self.description is None: self.description = """
This plot shows the contribution that each individual feature has had
on the prediction for a specific observation. The contributions (starting
from the population average) add up to the final prediction. This allows you
to explain exactly how each individual prediction has been built up
from all the individual ingredients in the model.
"""
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
self.register_dependencies('shap_values', 'shap_values_cats')
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='contributions-graph-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='contributions-graph-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([self.selector.layout()
], md=2), hide=self.hide_selector),
], justify="right"),
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='contributions-graph-index-label-'+self.name),
dbc.Tooltip(f"Select the {self.explainer.index_name} to display the feature contributions for",
target='contributions-graph-index-label-'+self.name),
dcc.Dropdown(id='contributions-graph-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=4), hide=self.hide_index),
make_hideable(
dbc.Col([
dbc.Label("Depth:", id='contributions-graph-depth-label-'+self.name),
dbc.Tooltip("Number of features to display",
target='contributions-graph-depth-label-'+self.name),
dbc.Select(id='contributions-graph-depth-'+self.name,
options = [{'label': str(i+1), 'value':i+1}
for i in range(self.explainer.n_features(self.cats))],
value=None if self.depth is None else str(self.depth))
], md=2), hide=self.hide_depth),
make_hideable(
dbc.Col([
dbc.Label("Sorting:", id='contributions-graph-sorting-label-'+self.name),
dbc.Tooltip("Sort the features either by highest absolute (positive or negative) impact (absolute), "
"from most positive the most negative (high-to-low)"
"from most negative to most positive (low-to-high or "
"according the global feature importance ordering (importance).",
target='contributions-graph-sorting-label-'+self.name),
dbc.Select(id='contributions-graph-sorting-'+self.name,
options = [{'label': 'Absolute', 'value': 'abs'},
{'label': 'High to Low', 'value': 'high-to-low'},
{'label': 'Low to High', 'value': 'low-to-high'},
{'label': 'Importance', 'value': 'importance'}],
value=self.sort)
], md=2), hide=self.hide_sort),
make_hideable(
dbc.Col([
dbc.Label("Orientation:", id='contributions-graph-orientation-label-'+self.name),
dbc.Tooltip("Show vertical bars left to right or horizontal bars from top to bottom",
target='contributions-graph-orientation-label-'+self.name),
dbc.Select(id='contributions-graph-orientation-'+self.name,
options = [{'label': 'Vertical', 'value': 'vertical'},
{'label': 'Horizontal', 'value': 'horizontal'}],
value=self.orientation)
], md=2), hide=self.hide_orientation),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='contributions-graph-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='contributions-graph-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='contributions-graph-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=2), hide=self.hide_cats),
], form=True),
dbc.Row([
dbc.Col([
dcc.Loading(id='loading-contributions-graph-'+self.name,
children=[dcc.Graph(id='contributions-graph-'+self.name,
config=dict(modeBarButtons=[['toImage']], displaylogo=False))]),
]),
]),
]),
])
def component_callbacks(self, app):
if self.feature_input_component is None:
@app.callback(
[Output('contributions-graph-'+self.name, 'figure'),
Output('contributions-graph-depth-'+self.name, 'options')],
[Input('contributions-graph-index-'+self.name, 'value'),
Input('contributions-graph-depth-'+self.name, 'value'),
Input('contributions-graph-sorting-'+self.name, 'value'),
Input('contributions-graph-orientation-'+self.name, 'value'),
Input('contributions-graph-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_output_div(index, depth, sort, orientation, cats, pos_label):
if index is None:
raise PreventUpdate
depth = None if depth is None else int(depth)
plot = self.explainer.plot_shap_contributions(index, topx=depth,
cats=bool(cats), sort=sort, orientation=orientation,
pos_label=pos_label, higher_is_better=self.higher_is_better)
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-graph-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (plot, depth_options)
else:
return (plot, dash.no_update)
else:
@app.callback(
[Output('contributions-graph-'+self.name, 'figure'),
Output('contributions-graph-depth-'+self.name, 'options')],
[Input('contributions-graph-depth-'+self.name, 'value'),
Input('contributions-graph-sorting-'+self.name, 'value'),
Input('contributions-graph-orientation-'+self.name, 'value'),
Input('contributions-graph-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
*self.feature_input_component._feature_callback_inputs])
def update_output_div(depth, sort, orientation, cats, pos_label, *inputs):
depth = None if depth is None else int(depth)
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
plot = self.explainer.plot_shap_contributions(X_row=X_row,
topx=depth, cats=bool(cats), sort=sort, orientation=orientation,
pos_label=pos_label, higher_is_better=self.higher_is_better)
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-graph-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (plot, depth_options)
else:
return (plot, dash.no_update)
class ShapContributionsTableComponentPerso(ExplainerComponent):
def __init__(self, explainer, title="Contributions Table", name=None,
subtitle="How has each feature contributed to the prediction?",
hide_title=False, hide_subtitle=False, hide_index=False,
hide_depth=False, hide_sort=False, hide_cats=False,
hide_selector=False, feature_input_component=None,
pos_label=None, index=None, depth=None, sort='abs', cats=True,
description=None, **kwargs):
"""Show SHAP values contributions to prediction in a table component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Contributions Table".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): Hide component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_index (bool, optional): Hide index selector. Defaults to False.
hide_depth (bool, optional): Hide depth selector. Defaults to False.
hide_sort (bool, optional): Hide sorting dropdown. Default to False.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_selector (bool, optional): hide pos label selector. Defaults to False.
feature_input_component (FeatureInputComponent): A FeatureInputComponent
that will give the input to the graph instead of the index selector.
If not None, hide_index=True. Defaults to None.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
index ([type], optional): Initial index to display. Defaults to None.
depth ([type], optional): Initial number of features to display. Defaults to None.
sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values.
Defaults to 'high-to-low'.
cats (bool, optional): Group categoricals. Defaults to True.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.index_name = 'contributions-table-index-'+self.name
if self.depth is not None:
self.depth = min(self.depth, self.explainer.n_features(self.cats))
if not self.explainer.cats:
self.hide_cats = True
if self.feature_input_component is not None:
self.exclude_callbacks(self.feature_input_component)
self.hide_index = True
if self.description is None: self.description = """
This tables shows the contribution that each individual feature has had
on the prediction for a specific observation. The contributions (starting
from the population average) add up to the final prediction. This allows you
to explain exactly how each individual prediction has been built up
from all the individual ingredients in the model.
"""
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
self.register_dependencies('shap_values', 'shap_values_cats')
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='contributions-table-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='contributions-table-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='contributions-table-index-label-'+self.name),
dbc.Tooltip(f"Select the {self.explainer.index_name} to display the feature contributions for",
target='contributions-table-index-label-'+self.name),
dcc.Dropdown(id='contributions-table-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=4), hide=self.hide_index),
make_hideable(
dbc.Col([dbc.Form([dbc.FormGroup([
dbc.Label("Sorting:", id='contributions-table-sorting-label-'+self.name,style=dict(marginLeft= 40)),
dbc.Tooltip("Sort the features either by highest absolute (positive or negative) impact (absolute), "
"from most positive the most negative (high-to-low)"
"from most negative to most positive (low-to-high or "
"according the global feature importance ordering (importance).",
target='contributions-table-sorting-label-'+self.name),
dbc.Select(id='contributions-table-sorting-'+self.name,
options = [{'label': 'Absolute', 'value': 'abs'},
{'label': 'High to Low', 'value': 'high-to-low'},
{'label': 'Low to High', 'value': 'low-to-high'},
{'label': 'Importance', 'value': 'importance'}],
value=self.sort)
] ,className="mr-3") ],inline=True,) ], md=6), hide=self.hide_depth),
make_hideable(
dbc.Col([dbc.Form([dbc.FormGroup([
dbc.Label("Depth:", id='contributions-table-depth-label-'+self.name, ),
dbc.Tooltip("Number of features to display",
target='contributions-table-depth-label-'+self.name),
dbc.Select(id='contributions-table-depth-'+self.name,
options = [{'label': str(i+1), 'value':i+1}
for i in range(self.explainer.n_features(self.cats))],
value=self.depth)
] ,className="mr-3") ],inline=True,)], md=2), hide=self.hide_sort),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='contributions-table-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='contributions-table-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='contributions-table-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=3), hide=self.hide_cats),
], form=True),
dbc.Row([
dbc.Col([
html.Br(),
html.Div(id='contributions-table-'+self.name)
]),
]),
], style={"height":"710px","overflow":"auto","margin-bottom":"0px"}),
])
def component_callbacks(self, app):
if self.feature_input_component is None:
@app.callback(
[Output('contributions-table-'+self.name, 'children'),
Output('contributions-table-depth-'+self.name, 'options')],
[Input('contributions-table-index-'+self.name, 'value'),
Input('contributions-table-depth-'+self.name, 'value'),
Input('contributions-table-sorting-'+self.name, 'value'),
Input('contributions-table-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_output_div(index, depth, sort, cats, pos_label):
if index is None:
raise PreventUpdate
depth = None if depth is None else int(depth)
contributions_table = dbc.Table.from_dataframe(
self.explainer.contrib_summary_df(index, cats=bool(cats), topx=depth,
sort=sort, pos_label=pos_label))
tooltip_cols = {}
for tr in contributions_table.children[1].children:
# insert tooltip target id's into the table html.Tr() elements:
tds = tr.children
col = tds[0].children.split(" = ")[0]
if self.explainer.description(col) != "":
tr.id = f"contributions-table-hover-{col}-"+self.name
tooltip_cols[col] = self.explainer.description(col)
tooltips = [dbc.Tooltip(desc,
target=f"contributions-table-hover-{col}-"+self.name,
placement="top") for col, desc in tooltip_cols.items()]
output_div = html.Div([contributions_table, *tooltips])
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-table-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (output_div, depth_options)
else:
return (output_div, dash.no_update)
else:
@app.callback(
[Output('contributions-table-'+self.name, 'children'),
Output('contributions-table-depth-'+self.name, 'options')],
[Input('contributions-table-depth-'+self.name, 'value'),
Input('contributions-table-sorting-'+self.name, 'value'),
Input('contributions-table-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
*self.feature_input_component._feature_callback_inputs])
def update_output_div(depth, sort, cats, pos_label, *inputs):
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
depth = None if depth is None else int(depth)
contributions_table = dbc.Table.from_dataframe(
self.explainer.contrib_summary_df(X_row=X_row, cats=bool(cats), topx=depth,
sort=sort, pos_label=pos_label))
tooltip_cols = {}
for tr in contributions_table.children[1].children:
# insert tooltip target id's into the table html.Tr() elements:
tds = tr.children
col = tds[0].children.split(" = ")[0]
if self.explainer.description(col) != "":
tr.id = f"contributions-table-hover-{col}-"+self.name
tooltip_cols[col] = self.explainer.description(col)
tooltips = [dbc.Tooltip(desc,
target=f"contributions-table-hover-{col}-"+self.name,
placement="top") for col, desc in tooltip_cols.items()]
output_div = html.Div([contributions_table, *tooltips])
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-table-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (output_div, depth_options)
else:
return (output_div, dash.no_update)
class ShapContributionsTableComponent(ExplainerComponent):
def __init__(self, explainer, title="Contributions Table", name=None,
subtitle="How has each feature contributed to the prediction?",
hide_title=False, hide_subtitle=False, hide_index=False,
hide_depth=False, hide_sort=False, hide_cats=False,
hide_selector=False, feature_input_component=None,
pos_label=None, index=None, depth=None, sort='abs', cats=True,
description=None, **kwargs):
"""Show SHAP values contributions to prediction in a table component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Contributions Table".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
subtitle (str): subtitle
hide_title (bool, optional): Hide component title. Defaults to False.
hide_subtitle (bool, optional): Hide subtitle. Defaults to False.
hide_index (bool, optional): Hide index selector. Defaults to False.
hide_depth (bool, optional): Hide depth selector. Defaults to False.
hide_sort (bool, optional): Hide sorting dropdown. Default to False.
hide_cats (bool, optional): Hide group cats toggle. Defaults to False.
hide_selector (bool, optional): hide pos label selector. Defaults to False.
feature_input_component (FeatureInputComponent): A FeatureInputComponent
that will give the input to the graph instead of the index selector.
If not None, hide_index=True. Defaults to None.
pos_label ({int, str}, optional): initial pos label.
Defaults to explainer.pos_label
index ([type], optional): Initial index to display. Defaults to None.
depth ([type], optional): Initial number of features to display. Defaults to None.
sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values.
Defaults to 'high-to-low'.
cats (bool, optional): Group categoricals. Defaults to True.
description (str, optional): Tooltip to display when hover over
component title. When None default text is shown.
"""
super().__init__(explainer, title, name)
self.index_name = 'contributions-table-index-'+self.name
if self.depth is not None:
self.depth = min(self.depth, self.explainer.n_features(self.cats))
if not self.explainer.cats:
self.hide_cats = True
if self.feature_input_component is not None:
self.exclude_callbacks(self.feature_input_component)
self.hide_index = True
if self.description is None: self.description = """
This tables shows the contribution that each individual feature has had
on the prediction for a specific observation. The contributions (starting
from the population average) add up to the final prediction. This allows you
to explain exactly how each individual prediction has been built up
from all the individual ingredients in the model.
"""
self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label)
self.register_dependencies('shap_values', 'shap_values_cats')
def layout(self):
return dbc.Card([
make_hideable(
dbc.CardHeader([
html.Div([
html.H3(self.title, id='contributions-table-title-'+self.name),
make_hideable(html.H6(self.subtitle, className='card-subtitle'), hide=self.hide_subtitle),
dbc.Tooltip(self.description, target='contributions-table-title-'+self.name),
]),
]), hide=self.hide_title),
dbc.CardBody([
dbc.Row([
make_hideable(
dbc.Col([
dbc.Label(f"{self.explainer.index_name}:", id='contributions-table-index-label-'+self.name),
dbc.Tooltip(f"Select the {self.explainer.index_name} to display the feature contributions for",
target='contributions-table-index-label-'+self.name),
dcc.Dropdown(id='contributions-table-index-'+self.name,
options = [{'label': str(idx), 'value':idx}
for idx in self.explainer.idxs],
value=self.index)
], md=4), hide=self.hide_index),
make_hideable(
dbc.Col([
dbc.Label("Depth:", id='contributions-table-depth-label-'+self.name),
dbc.Tooltip("Number of features to display",
target='contributions-table-depth-label-'+self.name),
dbc.Select(id='contributions-table-depth-'+self.name,
options = [{'label': str(i+1), 'value':i+1}
for i in range(self.explainer.n_features(self.cats))],
value=self.depth)
], md=2), hide=self.hide_depth),
make_hideable(
dbc.Col([
dbc.Label("Sorting:", id='contributions-table-sorting-label-'+self.name),
dbc.Tooltip("Sort the features either by highest absolute (positive or negative) impact (absolute), "
"from most positive the most negative (high-to-low)"
"from most negative to most positive (low-to-high or "
"according the global feature importance ordering (importance).",
target='contributions-table-sorting-label-'+self.name),
dbc.Select(id='contributions-table-sorting-'+self.name,
options = [{'label': 'Absolute', 'value': 'abs'},
{'label': 'High to Low', 'value': 'high-to-low'},
{'label': 'Low to High', 'value': 'low-to-high'},
{'label': 'Importance', 'value': 'importance'}],
value=self.sort)
], md=2), hide=self.hide_sort),
make_hideable(
dbc.Col([self.selector.layout()
], width=2), hide=self.hide_selector),
make_hideable(
dbc.Col([
dbc.FormGroup([
dbc.Label("Grouping:", id='contributions-table-group-cats-label-'+self.name),
dbc.Tooltip("Group onehot encoded categorical variables together",
target='contributions-table-group-cats-label-'+self.name),
dbc.Checklist(
options=[{"label": "Group cats", "value": True}],
value=[True] if self.cats else [],
id='contributions-table-group-cats-'+self.name,
inline=True,
switch=True,
),
]),
], md=3), hide=self.hide_cats),
], form=True),
dbc.Row([
dbc.Col([
html.Div(id='contributions-table-'+self.name)
]),
]),
]),
])
def component_callbacks(self, app):
if self.feature_input_component is None:
@app.callback(
[Output('contributions-table-'+self.name, 'children'),
Output('contributions-table-depth-'+self.name, 'options')],
[Input('contributions-table-index-'+self.name, 'value'),
Input('contributions-table-depth-'+self.name, 'value'),
Input('contributions-table-sorting-'+self.name, 'value'),
Input('contributions-table-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value')])
def update_output_div(index, depth, sort, cats, pos_label):
if index is None:
raise PreventUpdate
depth = None if depth is None else int(depth)
contributions_table = dbc.Table.from_dataframe(
self.explainer.contrib_summary_df(index, cats=bool(cats), topx=depth,
sort=sort, pos_label=pos_label))
tooltip_cols = {}
for tr in contributions_table.children[1].children:
# insert tooltip target id's into the table html.Tr() elements:
tds = tr.children
col = tds[0].children.split(" = ")[0]
if self.explainer.description(col) != "":
tr.id = f"contributions-table-hover-{col}-"+self.name
tooltip_cols[col] = self.explainer.description(col)
tooltips = [dbc.Tooltip(desc,
target=f"contributions-table-hover-{col}-"+self.name,
placement="top") for col, desc in tooltip_cols.items()]
output_div = html.Div([contributions_table, *tooltips])
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-table-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (output_div, depth_options)
else:
return (output_div, dash.no_update)
else:
@app.callback(
[Output('contributions-table-'+self.name, 'children'),
Output('contributions-table-depth-'+self.name, 'options')],
[Input('contributions-table-depth-'+self.name, 'value'),
Input('contributions-table-sorting-'+self.name, 'value'),
Input('contributions-table-group-cats-'+self.name, 'value'),
Input('pos-label-'+self.name, 'value'),
*self.feature_input_component._feature_callback_inputs])
def update_output_div(depth, sort, cats, pos_label, *inputs):
X_row = self.explainer.get_row_from_input(inputs, ranked_by_shap=True)
depth = None if depth is None else int(depth)
contributions_table = dbc.Table.from_dataframe(
self.explainer.contrib_summary_df(X_row=X_row, cats=bool(cats), topx=depth,
sort=sort, pos_label=pos_label))
tooltip_cols = {}
for tr in contributions_table.children[1].children:
# insert tooltip target id's into the table html.Tr() elements:
tds = tr.children
col = tds[0].children.split(" = ")[0]
if self.explainer.description(col) != "":
tr.id = f"contributions-table-hover-{col}-"+self.name
tooltip_cols[col] = self.explainer.description(col)
tooltips = [dbc.Tooltip(desc,
target=f"contributions-table-hover-{col}-"+self.name,
placement="top") for col, desc in tooltip_cols.items()]
output_div = html.Div([contributions_table, *tooltips])
ctx = dash.callback_context
trigger = ctx.triggered[0]['prop_id'].split('.')[0]
if trigger == 'contributions-table-group-cats-'+self.name:
depth_options = [{'label': str(i+1), 'value': i+1}
for i in range(self.explainer.n_features(bool(cats)))]
return (output_div, depth_options)
else:
return (output_div, dash.no_update) | AMLBID | /Explainer/dashboard_components/shap_components.py | shap_components.py |
__all__ = [
'ImportancesComposite',
'ClassifierModelStatsComposite',
'RegressionModelStatsComposite',
'IndividualPredictionsComposite',
'ShapDependenceComposite',
'ShapInteractionsComposite',
'DecisionTreesComposite',
'WhatIfComposite',
'Testcomposite',
'SuggestedModelComposite',
'RefinementComposite',
]
import os.path
import pandas as pd
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash
from ..AMLBID_Explainer import RandomForestExplainer, XGBExplainer
from ..dashboard_methods import *
from .classifier_components import *
from .regression_components import *
from .overview_components import *
from .connectors import *
from .shap_components import *
from .decisiontree_components import *
from dash.dependencies import Input, Output, State
from .ConfGenerator import *
class ImportancesComposite(ExplainerComponent):
def __init__(self, explainer, title="Feature Importances", name=None,
hide_importances=False,
hide_selector=True, **kwargs):
"""Overview tab of feature importances
Can show both permutation importances and mean absolute shap values.
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Importances".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_importances (bool, optional): hide the ImportancesComponent
hide_selector (bool, optional): hide the post label selector.
Defaults to True.
"""
super().__init__(explainer, title, name)
#self.importances = ImportancesComponent(
#explainer, name=self.name+"0", hide_selector=hide_selector, **kwargs)
self.shap_summary = ShapSummaryComponent(explainer,name=self.name+"1",
hide_title=True, hide_selector=True,
hide_depth=False, depth=5,
hide_cats=True)
self.register_components()
def layout(self):
return html.Div([
dbc.Row(dbc.Col([
dbc.CardDeck([
dbc.Card([
dbc.CardHeader([
html.H4([dbc.Button("Description", id="positioned-toast-toggle", color="primary", className="mr-1")],style={"float": "right"}),
html.H3(["Feature Importances"], className="card-title"),
html.H6("Which features had the biggest impact?",className="card-subtitle")]),
dbc.CardBody([
dbc.Toast(html.Div([html.P(
"On the plot, you can check out for yourself which parameters were the most important."
f"{self.explainer.columns_ranked_by_shap(cats=True)[0]} was the most important"
f", followed by {self.explainer.columns_ranked_by_shap(cats=True)[1]}"
f" and {self.explainer.columns_ranked_by_shap(cats=True)[2]}."),
#html.Br(),
html.P("If you select 'detailed' summary type you can see the impact of that variable on "
"each individual prediction. With 'aggregate' you see the average impact size "
"of that variable on the finale prediction.")],style={"text-align": "justify"}),
id="positioned-toast",header="Feature Importances",is_open=False,dismissable=True,
style={"position": "fixed", "top": 25, "right": 10, "width": 400},),
self.shap_summary.layout()
],style=dict(marginTop= -20))
]) ])
])) ], style=dict(marginTop=25, marginBottom=25) )
def component_callbacks(self, app):
@app.callback(Output("positioned-toast", "is_open"),[Input("positioned-toast-toggle", "n_clicks")],)
def open_toast(n):
if n:
return True
return False
class ClassifierModelStatsComposite(ExplainerComponent):
def __init__(self, explainer, title="Recommendation Performances", name=None,
hide_title=True, hide_selector=True,
hide_globalcutoff=True,
hide_modelsummary=False, hide_confusionmatrix=False,
hide_precision=True, hide_classification=True,
hide_rocauc=True, hide_prauc=True,
hide_liftcurve=True, hide_cumprecision=True,hide_range=True,
pos_label=None,
bin_size=0.1, quantiles=10, cutoff=0.5, **kwargs):
"""Composite of multiple classifier related components:
- precision graph
- confusion matrix
- lift curve
- classification graph
- roc auc graph
- pr auc graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Decision Trees".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_globalcutoff (bool, optional): hide CutoffPercentileComponent
hide_modelsummary (bool, optional): hide ClassifierModelSummaryComponent
hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent
hide_precision (bool, optional): hide PrecisionComponent
hide_classification (bool, optional): hide ClassificationComponent
hide_rocauc (bool, optional): hide RocAucComponent
hide_prauc (bool, optional): hide PrAucComponent
hide_liftcurve (bool, optional): hide LiftCurveComponent
hide_cumprecision (bool, optional): hide CumulativePrecisionComponent
pos_label ({int, str}, optional): initial pos label. Defaults to explainer.pos_label
bin_size (float, optional): bin_size for precision plot. Defaults to 0.1.
quantiles (int, optional): number of quantiles for precision plot. Defaults to 10.
cutoff (float, optional): initial cutoff. Defaults to 0.5.
"""
super().__init__(explainer, title, name)
self.summary = ClassifierModelSummaryComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.precision = PrecisionComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.confusionmatrix = ConfusionMatrixComponent(explainer, name=self.name+"2",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cumulative_precision = CumulativePrecisionComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.liftcurve = LiftCurveComponent(explainer, name=self.name+"4",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.classification = ClassificationComponent(explainer, name=self.name+"5",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.rocauc = RocAucComponent(explainer, name=self.name+"6",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.prauc = PrAucComponent(explainer, name=self.name+"7",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cutoffpercentile = CutoffPercentileComponent(explainer, name=self.name+"8",
hide_selector=hide_selector, pos_label=pos_label, **kwargs)
self.cutoffconnector = CutoffConnector(self.cutoffpercentile,
[self.summary, self.precision, self.confusionmatrix, self.liftcurve,
self.cumulative_precision, self.classification, self.rocauc, self.prauc])
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
html.H2('Model Performance:')]), hide=self.hide_title),
]),
dbc.Row([
make_hideable(
dbc.Col([
self.cutoffpercentile.layout(),
]), hide=self.hide_globalcutoff),
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.summary.layout(), hide=self.hide_modelsummary),
make_hideable(self.confusionmatrix.layout(), hide=self.hide_confusionmatrix),
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.precision.layout(), hide=self.hide_precision),
make_hideable(self.classification.layout(), hide=self.hide_classification)
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.rocauc.layout(), hide=self.hide_rocauc),
make_hideable(self.prauc.layout(), hide=self.hide_prauc),
], style=dict(marginBottom=25)),
dbc.CardDeck([
make_hideable(self.liftcurve.layout(), self.hide_liftcurve),
make_hideable(self.cumulative_precision.layout(), self.hide_cumprecision),
], style=dict(marginBottom=25)),
])
class RegressionModelStatsComposite(ExplainerComponent):
def __init__(self, explainer, title="Regression Stats", name=None,
hide_title=True, hide_modelsummary=False,
hide_predsvsactual=False, hide_residuals=False,
hide_regvscol=False,
logs=False, pred_or_actual="vs_pred", residuals='difference',
col=None, **kwargs):
"""Composite for displaying multiple regression related graphs:
- predictions vs actual plot
- residual plot
- residuals vs feature
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Regression Stats".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_modelsummary (bool, optional): hide RegressionModelSummaryComponent
hide_predsvsactual (bool, optional): hide PredictedVsActualComponent
hide_residuals (bool, optional): hide ResidualsComponent
hide_regvscol (bool, optional): hide RegressionVsColComponent
logs (bool, optional): Use log axis. Defaults to False.
pred_or_actual (str, optional): plot residuals vs predictions
or vs y (actual). Defaults to "vs_pred".
residuals (str, {'difference', 'ratio', 'log-ratio'} optional):
How to calcualte residuals. Defaults to 'difference'.
col ({str, int}, optional): Feature to use for residuals plot. Defaults to None.
"""
super().__init__(explainer, title, name)
assert pred_or_actual in ['vs_actual', 'vs_pred'], \
"pred_or_actual should be 'vs_actual' or 'vs_pred'!"
self.modelsummary = RegressionModelSummaryComponent(explainer,
name=self.name+"0",**kwargs)
self.preds_vs_actual = PredictedVsActualComponent(explainer, name=self.name+"0",
logs=logs, **kwargs)
self.residuals = ResidualsComponent(explainer, name=self.name+"1",
pred_or_actual=pred_or_actual, residuals=residuals, **kwargs)
self.reg_vs_col = RegressionVsColComponent(explainer, name=self.name+"2",
logs=logs, **kwargs)
def layout(self):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
html.H2('Model Performance:')]), hide=self.hide_title)
]),
dbc.CardDeck([
make_hideable(self.modelsummary.layout(), hide=self.hide_modelsummary),
make_hideable(self.preds_vs_actual.layout(), hide=self.hide_predsvsactual),
], style=dict(margin=25)),
dbc.CardDeck([
make_hideable(self.residuals.layout(), hide=self.hide_residuals),
make_hideable(self.reg_vs_col.layout(), hide=self.hide_regvscol),
], style=dict(margin=25))
])
class IndividualPredictionsComposite(ExplainerComponent):
def __init__(self, explainer, title="Individual Predictions", name=None,
hide_predindexselector=False, hide_predictionsummary=False,
hide_contributiongraph=False, hide_pdp=False,
hide_contributiontable=False,
hide_title=False, hide_selector=True, **kwargs):
"""Composite for a number of component that deal with individual predictions:
- random index selector
- prediction summary
- shap contributions graph
- shap contribution table
- pdp graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Individual Predictions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_predindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_predictionsummary (bool, optional): hide ClassifierPredictionSummaryComponent
or RegressionPredictionSummaryComponent
hide_contributiongraph (bool, optional): hide ShapContributionsGraphComponent
hide_pdp (bool, optional): hide PdpComponent
hide_contributiontable (bool, optional): hide ShapContributionsTableComponent
hide_title (bool, optional): hide title. Defaults to False.
hide_selector(bool, optional): hide all pos label selectors. Defaults to True.
"""
super().__init__(explainer, title, name)
if self.explainer.is_classifier:
self.index = ClassifierRandomIndexComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.summary = ClassifierPredictionSummaryComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
elif self.explainer.is_regression:
self.index = RegressionRandomIndexComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.summary = RegressionPredictionSummaryComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
self.contributions = ShapContributionsGraphComponent(explainer, name=self.name+"2",
hide_selector=hide_selector, **kwargs)
self.pdp = PdpComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, **kwargs)
self.contributions_list = ShapContributionsTableComponent(explainer, name=self.name+"4",
hide_selector=hide_selector, **kwargs)
self.index_connector = IndexConnector(self.index,
[self.summary, self.contributions, self.pdp, self.contributions_list])
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.index.layout(), hide=self.hide_predindexselector),
make_hideable(self.summary.layout(), hide=self.hide_predictionsummary),
], style=dict(marginBottom=25, marginTop=25)),
dbc.CardDeck([
make_hideable(self.contributions.layout(), hide=self.hide_contributiongraph),
make_hideable(self.pdp.layout(), hide=self.hide_pdp),
], style=dict(marginBottom=25, marginTop=25)),
dbc.Row([
dbc.Col([
make_hideable(self.contributions_list.layout(), hide=self.hide_contributiontable),
], md=6),
dbc.Col([
html.Div([]),
], md=6),
])
], fluid=True)
class WhatIfComposite(ExplainerComponent):
def __init__(self, explainer, title="What-If Analysis", name=None,
hide_whatifindexselector=False, hide_inputeditor=False,
hide_whatifprediction=False, hide_whatifcontributiongraph=False,
hide_whatifpdp=True, hide_whatifcontributiontable=False,
hide_title=True, hide_selector=True,
n_input_cols=4, sort='importance', **kwargs):
"""Composite for the whatif component:
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Individual Predictions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_selector(bool, optional): hide all pos label selectors. Defaults to True.
hide_whatifindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_inputeditor (bool, optional): hide FeatureInputComponent
hide_whatifprediction (bool, optional): hide PredictionSummaryComponent
hide_whatifcontributiongraph (bool, optional): hide ShapContributionsGraphComponent
hide_whatifcontributiontable (bool, optional): hide ShapContributionsTableComponent
hide_whatifpdp (bool, optional): hide PdpComponent
n_input_cols (int, optional): number of columns to divide the feature inputs into.
Defaults to 4.
sort ({'abs', 'high-to-low', 'low-to-high', 'importance'}, optional): sorting of shap values.
Defaults to 'importance'.
"""
super().__init__(explainer, title, name)
if 'hide_whatifcontribution' in kwargs:
print("Warning: hide_whatifcontribution will be deprecated, use hide_whatifcontributiongraph instead!")
self.hide_whatifcontributiongraph = kwargs['hide_whatifcontribution']
self.input = FeatureInputComponent(explainer, name=self.name+"0",
hide_selector=hide_selector,hide_title=True, hide_subtitle=True, n_input_cols=self.n_input_cols,
**update_params(kwargs, hide_index=True))
if self.explainer.is_classifier:
self.index = ClassifierRandomIndexComponentPerso(explainer, name=self.name+"1",
hide_selector=hide_selector,hide_title=True, hide_subtitle=True,
hide_slider=True,hide_pred_or_perc=True,hide_labels=True, **kwargs)
self.prediction = ClassifierPredictionSummaryComponent(explainer, name=self.name+"2",
feature_input_component=self.input,
hide_star_explanation=True,
hide_selector=hide_selector, **kwargs)
elif self.explainer.is_regression:
pass
#self.index = RegressionRandomIndexComponent(explainer, name=self.name+"1", **kwargs)
#self.prediction = RegressionPredictionSummaryComponent(explainer, name=self.name+"2",
# feature_input_component=self.input, **kwargs)
self.contribgraph = ShapContributionsGraphComponent(explainer, name=self.name+"3",
feature_input_component=self.input,
hide_selector=hide_selector, sort=sort, **kwargs)
self.contribtable = ShapContributionsTableComponentPerso(explainer, name=self.name+"4",
feature_input_component=self.input,hide_cats=True,
hide_selector=hide_selector, sort=sort, **kwargs)
self.pdp = PdpComponent(explainer, name=self.name+"5",
feature_input_component=self.input,
hide_selector=hide_selector, **kwargs)
self.index_connector = IndexConnector(self.index, [self.input])
def layout(self):
return dbc.Container([
dbc.Row([
make_hideable(
dbc.Col([html.H1(self.title)]), hide=self.hide_title),
]),
dbc.Row([
make_hideable(
dbc.Col([
dbc.Card([
dbc.CardHeader([html.H4("Select Observation", className="card-title"),
html.H6("Select from list or pick at random", className="card-subtitle")]),
dbc.CardBody([
self.index.layout(),
html.Hr(),
self.input.layout()
],style=dict(marginTop= -20))])
], md=7), hide=self.hide_whatifindexselector),
make_hideable(
dbc.Col([
self.prediction.layout(),
], md=5), hide=self.hide_whatifprediction),
], style=dict(marginBottom=15, marginTop=15)),
dbc.CardDeck([
#make_hideable(self.contribgraph.layout(), hide=self.hide_whatifcontributiongraph),
make_hideable(self.pdp.layout(), hide=self.hide_whatifpdp),
], style=dict(marginBottom=15, marginTop=15)),
dbc.Row([
make_hideable(
dbc.Col([
self.contribtable.layout()
], md=6), hide=self.hide_whatifcontributiontable),
dbc.Col([self.contribgraph.layout()], style=dict(marginBottom=15), md=6),
])
], fluid=True)
class ShapDependenceComposite(ExplainerComponent):
def __init__(self, explainer, title='Feature Dependence', name=None,
hide_selector=True,
hide_shapsummary=False, hide_shapdependence=False,
depth=None, cats=True, **kwargs):
"""Composite of ShapSummary and ShapDependence component
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Dependence".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_shapsummary (bool, optional): hide ShapSummaryComponent
hide_shapdependence (bool, optional): ShapDependenceComponent
depth (int, optional): Number of features to display. Defaults to None.
cats (bool, optional): Group categorical features. Defaults to True.
"""
super().__init__(explainer, title, name)
self.shap_summary = ShapSummaryComponent(
self.explainer, name=self.name+"0",
**update_params(kwargs, hide_selector=hide_selector, depth=depth, cats=cats))
self.shap_dependence = ShapDependenceComponent(
self.explainer, name=self.name+"1",
hide_selector=hide_selector, cats=cats,
**update_params(kwargs, hide_cats=True)
)
self.connector = ShapSummaryDependenceConnector(
self.shap_summary, self.shap_dependence)
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.shap_summary.layout(), hide=self.hide_shapsummary),
make_hideable(self.shap_dependence.layout(), hide=self.hide_shapdependence),
], style=dict(marginTop=25)),
], fluid=True)
class ShapInteractionsComposite(ExplainerComponent):
def __init__(self, explainer, title='Feature Interactions', name=None,
hide_selector=True,
hide_interactionsummary=False, hide_interactiondependence=False,
depth=None, cats=True, **kwargs):
"""Composite of InteractionSummaryComponent and InteractionDependenceComponent
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Feature Interactions".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_interactionsummary (bool, optional): hide InteractionSummaryComponent
hide_interactiondependence (bool, optional): hide InteractionDependenceComponent
depth (int, optional): Initial number of features to display. Defaults to None.
cats (bool, optional): Initally group cats. Defaults to True.
"""
super().__init__(explainer, title, name)
self.interaction_summary = InteractionSummaryComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, depth=depth, cats=cats, **kwargs)
self.interaction_dependence = InteractionDependenceComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, cats=cats, **update_params(kwargs, hide_cats=True))
self.connector = InteractionSummaryDependenceConnector(
self.interaction_summary, self.interaction_dependence)
def layout(self):
return dbc.Container([
dbc.CardDeck([
make_hideable(self.interaction_summary.layout(), hide=self.hide_interactionsummary),
make_hideable(self.interaction_dependence.layout(), hide=self.hide_interactiondependence),
], style=dict(marginTop=25))
], fluid=True)
class DecisionTreesComposite(ExplainerComponent):
def __init__(self, explainer, title="Decision Path", name=None,
hide_treeindexselector=False, hide_treesgraph=True,
hide_treepathtable=True, hide_treepathgraph=False,
hide_selector=True,n_input_cols=4, sort='importance', **kwargs):
"""Composite of decision tree related components:
- index selector
- individual decision trees barchart
- decision path table
- deciion path graph
Args:
explainer (Explainer): explainer object constructed with either
RandomForestClassifierExplainer() or RandomForestRegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Decision Trees".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_treeindexselector (bool, optional): hide ClassifierRandomIndexComponent
or RegressionRandomIndexComponent
hide_treesgraph (bool, optional): hide DecisionTreesComponent
hide_treepathtable (bool, optional): hide DecisionPathTableComponent
hide_treepathgraph (bool, optional): DecisionPathGraphComponent
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
"""
super().__init__(explainer, title, name)
#self.input = FeatureInputComponent(explainer, name=self.name+"4",
#hide_selector=hide_selector, n_input_cols=self.n_input_cols,hide_title=True,
#**update_params(kwargs, hide_index=True))
self.trees = DecisionTreesComponent(explainer, name=self.name+"0",
hide_selector=hide_selector, **kwargs)
self.decisionpath_table = DecisionPathTableComponent(explainer, name=self.name+"1",
hide_selector=hide_selector, **kwargs)
if explainer.is_classifier:
self.index = ClassifierRandomIndexComponentPerso(explainer, name=self.name+"2",
hide_selector=hide_selector,hide_title=True, hide_subtitle=True,
hide_slider=True,hide_pred_or_perc=True,hide_labels=True, **kwargs)
elif explainer.is_regression:
self.index = RegressionRandomIndexComponent(explainer, name=self.name+"2",
**kwargs)
self.prediction = ClassifierPredictionSummaryComponentPerso(explainer, name=self.name+"4",
hide_star_explanation=True,hide_title=True,
hide_selector=True, **kwargs)
self.decisionpath_graph = DecisionPathGraphComponent(explainer, name=self.name+"3",
hide_selector=hide_selector, **kwargs)
self.contribtable = ShapContributionsTableComponentPerso(explainer, name=self.name+"5",
hide_index=True,hide_cats=True, depth=2,
hide_selector=hide_selector, sort=sort, **kwargs)
self.index_connector = IndexConnector(self.index,
[self.trees, self.decisionpath_table, self.decisionpath_graph,self.prediction,self.contribtable] )
self.highlight_connector = HighlightConnector(self.trees,
[self.decisionpath_table, self.decisionpath_graph])
def layout(self):
if isinstance(self.explainer, XGBExplainer):
return html.Div([
dbc.Row([
make_hideable(
dbc.Col([
self.index.layout()
]), hide=False),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.trees.layout(),
], md=8), hide=self.hide_treesgraph),
make_hideable(
dbc.Col([
self.decisionpath_table.layout(),
], md=4), hide=True),
], style=dict(margin=25)),
dbc.Row([
make_hideable(
dbc.Col([
self.decisionpath_graph.layout()
]), hide=self.hide_treepathgraph),
], style=dict(margin=25)),
])
elif isinstance(self.explainer, RandomForestExplainer):
return html.Div([
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([html.H4("Select Observation", className="card-title"),
html.H6("Select from list or pick at random", className="card-subtitle")]),
dbc.CardBody([
self.index.layout(),
self.prediction.layout(),
],style=dict(marginTop= -20))])
], md=15),
dbc.Col([
self.decisionpath_graph.layout()
]),
], style=dict(margin=25, marginBottom=0)),
dbc.Row([
make_hideable(
dbc.Col([
self.trees.layout(),
]), hide=self.hide_treesgraph),
], style=dict(margin=0)),
dbc.Row([
make_hideable(
dbc.Col([
self.decisionpath_table.layout(),
]), hide=True),
], style=dict(margin=0)),
dbc.Row([
make_hideable(
dbc.Col([self.contribtable.layout()]),
hide=True),
], style=dict(marginBottom=25,marginTop=25)),
])
else:
raise ValueError("explainer is neither a RandomForestExplainer nor an XGBExplainer! "
"Pass decision_trees=False to disable the decision tree tab.")
class SuggestedModelComposite(ExplainerComponent):
def __init__(self, explainer, title="Suggested Model", name=None,
hide_title=True, hide_selector=True,
hide_globalcutoff=False,
hide_modelsummary=False, hide_confusionmatrix=False,
hide_precision=False, hide_classification=False,
hide_rocauc=False, hide_prauc=False,
hide_liftcurve=False, hide_cumprecision=False,
pos_label=None,
bin_size=0.1, quantiles=10, cutoff=0.5, **kwargs):
"""Composite of multiple classifier related components:
- precision graph
- confusion matrix
- lift curve
- classification graph
- roc auc graph
- pr auc graph
Args:
explainer (Explainer): explainer object constructed with either
ClassifierExplainer() or RegressionExplainer()
title (str, optional): Title of tab or page. Defaults to
"Decision Trees".
name (str, optional): unique name to add to Component elements.
If None then random uuid is generated to make sure
it's unique. Defaults to None.
hide_title (bool, optional): hide title. Defaults to True.
hide_selector (bool, optional): hide all pos label selectors. Defaults to True.
hide_globalcutoff (bool, optional): hide CutoffPercentileComponent
hide_modelsummary (bool, optional): hide ClassifierModelSummaryComponent
hide_confusionmatrix (bool, optional): hide ConfusionMatrixComponent
hide_precision (bool, optional): hide PrecisionComponent
hide_classification (bool, optional): hide ClassificationComponent
hide_rocauc (bool, optional): hide RocAucComponent
hide_prauc (bool, optional): hide PrAucComponent
hide_liftcurve (bool, optional): hide LiftCurveComponent
hide_cumprecision (bool, optional): hide CumulativePrecisionComponent
pos_label ({int, str}, optional): initial pos label. Defaults to explainer.pos_label
bin_size (float, optional): bin_size for precision plot. Defaults to 0.1.
quantiles (int, optional): number of quantiles for precision plot. Defaults to 10.
cutoff (float, optional): initial cutoff. Defaults to 0.5.
"""
super().__init__(explainer, title, name)
def layout(self):
ModelDescription=pd.read_pickle(os.path.dirname(__file__) +"/../assets/ModelsDescription.pkl")
#MD=ModelDescription[(ModelDescription.id== "SVM")]
MD=ModelDescription[(ModelDescription.Cname== self.explainer.model.__class__.__name__)]
RecommendedConf=self.explainer.recommended_config[0][1]
rows=[]
table_header = [html.Thead(html.Tr([html.Th("Hyperparameter"), html.Th("Value")]))]
for key,val in RecommendedConf.items():
rows.append(html.Tr([html.Td(key), html.Td(str(val))]))
table_body = [html.Tbody(rows)]
classifier_name=MD.Cname
return html.Div([
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([html.H3(MD.Name, className="card-title")]),
dbc.CardBody([
html.Div([html.H6(MD.Cimport , style={"float": "left"}),html.Code(html.H6(html.B(MD.Cname))),
html.I(MD.Conceptual_desc.to_list()[0]),]),
html.Br(),
html.P(MD.details),
],style={"text-align": "justify"}),
dbc.CardFooter([dbc.CardLink("Learn more>>", href=MD.Ref.to_list()[0], style={"float": "right"})]),
]),
html.Br(),
html.Div(html.Img(src="./assets/AMLBID.png",style={"max-width":"60%", "height:":"auto"} ),style={ "margin-left": "200px"}),
], width=6),
dbc.Col([
#html.Br(),
dbc.Card([
dbc.CardHeader([html.H3("Recommended model configuration", className="card-title")]),
dbc.CardBody([dbc.Table(table_header + table_body, bordered=False)]),
dbc.CardFooter([
html.Div([
dbc.Button("Export Pipeline", id="example-button", color="info", className="mr-1", style={"float": "left"}),
dbc.Tooltip(f"Export recommended configuration implementation as a Python file",target="example-button",placement="right",
style={"width":"300px"}),
#html.Span(id="example-output", style={, style={"float": "right"}, style={"float": "right"}, style={"float": "right"}}),
dbc.Alert(["Configuration implementation exported ", html.B("successfully!")],color="success", id="alert-auto",is_open=False,duration=7000,
style={"float": "right","margin-bottom":"0px"}),
]),
])
]), html.Br(),
]),
], style=dict(marginTop=25, marginBottom=25) )
])
def component_callbacks(self, app):
@app.callback(
#Output("example-output", "children"), [Input("example-button", "n_clicks")],
Output("alert-auto", "is_open"),[Input("example-button", "n_clicks")],[State("alert-auto", "is_open")],
)
def toggle_alert(n, is_open):
if n:
generate_pipeline_file(self.explainer.model.__class__.__name__,self.explainer.recommended_config,'your dataset path')
return not is_open
return is_open
class Testcomposite(ExplainerComponent):
def __init__(self, explainer,title="Suggested configurations", name=None, **kwargs ):
super().__init__(explainer, title,name)
def layout(self):
DataComposite=self.explainer.recommended_config
ModelDescription=pd.read_pickle(os.path.dirname(__file__) +"/../assets/ModelsDescription.pkl")
def make_item(i,md,exp_acc,RecommendedConf,isHidden):
rows=[]
table_header = [html.Thead(html.Tr([html.Th("Hyperparameter"), html.Th("Value")]))]
for key,val in RecommendedConf.items():
rows.append(html.Tr([html.Td(key), html.Td(str(val))]))
table_body = [html.Tbody(rows)]
return make_hideable(dbc.Card([
html.Br(),
dbc.CardHeader([dbc.Form([dbc.FormGroup([
html.Tr([html.Th(dbc.Button(html.H5(f"Recommendation {i} : "+md.Cname),id=f"group-{i}-toggle",block=True,
style={"border": "none", "background-color": "inherit", "font-size": "16px",
"cursor": "pointer" , "color": "black", "width": "100%","align":"left",
"text-align":"left"}),style={"width":"600px"}),
html.Th(html.H5(f"Expected accuracy : {exp_acc} ") ,style={"width":"400px"}),
html.Th([dbc.Button("Export Pipeline" ,id=f"example-button{i}",color="info"),
dbc.Tooltip(f"Export recommended config as a Python file",target=f"example-button{i}",placement="top",
style={"width":"300px"}),
dbc.Toast("Recommended configuration implementation exported successfully!",id=f"alert-auto{i}",
header="Export pipeline",is_open=False,dismissable=True,icon="success",duration=4000,
style={"position": "fixed", "top": 10, "right": 10, "width": 350}),
], style={"width":"200px"})]),
]) ,],inline=True) ]) ,
dbc.Collapse([
dbc.CardBody([
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([html.H3(md.Name, className="card-title")]),
dbc.CardBody([
html.Div([html.H6(md.Cimport , style={"float": "left"}),html.Code(html.H6(html.B(md.Cname))),
html.I(md.Conceptual_desc.to_list()[0]),]),
html.Br(),
html.P(md.details),
],style={"text-align": "justify"}),
dbc.CardFooter([dbc.CardLink("Learn more>>", href=md.Ref.to_list()[0], target="_blank", style={"float": "right"})]),
]),
html.Br(),
], width=6),
dbc.Col([
#html.Br(),
dbc.Card([
dbc.CardHeader([html.H3("Recommended model configuration", className="card-title")]),
dbc.CardBody([
dbc.Table(table_header + table_body, bordered=False)
]),
]), html.Br(),
]),
], )
])
],id=f"collapse-{i}"),
]),hide=isHidden)
items=[html.Br()]
if len(DataComposite)==3:
for index, item in zip(range(len(DataComposite)), DataComposite):
md=ModelDescription[(ModelDescription.Cname== item[0][1].__class__.__name__)]
RecommendedConf=item[1]
acc=round(item[2],5)
items.append(make_item(index+1,md,acc,RecommendedConf,False))
if len(DataComposite)==2:
for index, item in zip(range(len(DataComposite)), DataComposite):
md=ModelDescription[(ModelDescription.Cname== item[0][1].__class__.__name__)]
RecommendedConf=item[1]
acc=round(item[2],5)
items.append(make_item(index+1,md,acc,RecommendedConf,False))
items.append(make_item(index+2,md,acc,RecommendedConf,True))
if len(DataComposite)==1:
for index, item in zip(range(len(DataComposite)), DataComposite):
md=ModelDescription[(ModelDescription.Cname== item[0][1].__class__.__name__)]
RecommendedConf=item[1]
acc=round(item[2],5)
items.append(make_item(index+1,md,acc,RecommendedConf,False))
items.append(make_item(index+2,md,acc,RecommendedConf,True))
items.append(make_item(index+3,md,acc,RecommendedConf,True))
return html.Div(items,
className="accordion", style={"margin-left":"100px","margin-right":"100px"})
#,html.Br(), make_item(2),html.Br(), make_item(3)
def component_callbacks(self, app):
DataComposite=self.explainer.recommended_config
@app.callback(
[Output(f"collapse-1", "is_open"),Output(f"collapse-2", "is_open"),Output(f"collapse-3", "is_open"),
Output(f"alert-auto1", "is_open"),Output(f"alert-auto2", "is_open"),Output(f"alert-auto3", "is_open")],
[Input(f"group-1-toggle", "n_clicks"),Input(f"group-2-toggle", "n_clicks"),Input(f"group-3-toggle", "n_clicks"),
Input(f"example-button1", "n_clicks"),Input(f"example-button2", "n_clicks"),Input(f"example-button3", "n_clicks")],
[State(f"collapse-1", "is_open"),State(f"collapse-2", "is_open"),State(f"collapse-3", "is_open"),
State("alert-auto1", "is_open"),State("alert-auto2", "is_open"),State("alert-auto3", "is_open")],
)
def toggle_accordion(n1, n2, n3,n4,n5,n6, is_open1, is_open2, is_open3, is_open4, is_open5, is_open6):
ctx = dash.callback_context
if not ctx.triggered:
return False, False, False,False, False, False
else:
button_id = ctx.triggered[0]["prop_id"].split(".")[0]
if button_id == "group-1-toggle" and n1:
return not is_open1, False, False,False, False, False
elif button_id == "group-2-toggle" and n2:
return False, not is_open2, False,False, False, False
elif button_id == "group-3-toggle" and n3:
return False, False, not is_open3,False, False, False
elif button_id == "example-button1" and n4:
item=DataComposite[0]
generate_pipeline_file(item[0][1].__class__.__name__,item[1],'your dataset path')
return False, False, False,not is_open4,False, False
elif button_id == "example-button2" and n5:
item=DataComposite[1]
generate_pipeline_file(item[0][1].__class__.__name__,item[1],'your dataset path')
return False, False, False,False,not is_open5,False
elif button_id == "example-button3" and n6:
item=DataComposite[2]
generate_pipeline_file(item[0][1].__class__.__name__,item[1],'your dataset path')
return False, False,False, False, False, not is_open6
return False, False, False,False, False, False
class RefinementComposite(ExplainerComponent):
def __init__(self, explainer, title="Hyperparameters importance", name=None,
hide_title=True, hide_selector=True,
hide_globalcutoff=False,
hide_modelsummary=False, hide_confusionmatrix=False,
hide_precision=False, hide_classification=False,
hide_rocauc=False, hide_prauc=False,
hide_liftcurve=False, hide_cumprecision=False,
pos_label=None,
bin_size=0.1, quantiles=10, cutoff=0.5, **kwargs):
super().__init__(explainer, title, name)
def layout(self):
dic={
"AdaBoostClassifier": [4,7],
"GradientBoostingClassifier": [7,11],
"ExtraTreesClassifier": [5,12],
"DecisionTreeClassifier": [4,7],
"RandomForestClassifier": [5,12],
"SVC": [6,11]
}
fAnova_data = pd.read_csv(os.path.dirname(__file__)+'/../assets/ANOVA_FINAL.csv',sep=',')
NN=self.explainer.recommended_config[0][-1]
CN=self.explainer.model.__class__.__name__
RS=fAnova_data[(fAnova_data.dataset==NN) & (fAnova_data.algorithm =="RandomForest")].to_numpy()
HI=pd.DataFrame(RS[:dic[CN][0]]).sort_values(by=[2], ascending=False).to_numpy()
hyper_importance_table_header = [html.Thead(html.Tr([html.Th("Hyperparameter"), html.Th("Importance")]))]
rows=[]
for i in range(dic[CN][0]):
rows.append(html.Tr([html.Td(HI[i][1]), html.Td(dbc.Progress(value=HI[i][2]*100, color="00005E", className="mb-3"))]))
hyper_importance_table_body = [html.Tbody(rows)]
HCI=pd.DataFrame(RS[dic[CN][0]:2*dic[CN][0]]).sort_values(by=[2], ascending=False).to_numpy()
rows=[]
hyper_corr_importance_table_header = [html.Thead(html.Tr([html.Th("Hyperparameters"), html.Th("Dependence")]))]
for i in range(dic[CN][0]):
rows.append(html.Tr([html.Td(HCI[i][1]), html.Td(dbc.Progress(value=HCI[i][2]*2500, color="00005E", className="mb-3"))]))
hyper_corr_importance_table_body = [html.Tbody(rows)]
return html.Div([
dbc.Row([
dbc.Col([ html.Br(),
dbc.Card(
dbc.CardBody([
html.H3("Hyperparameters importance"),
dbc.Table(hyper_importance_table_header + hyper_importance_table_body, bordered=False),
#className="p-5"
#self.shap_dependence.layout()
])), html.Br(),
]),
dbc.Col([ html.Br(),
dbc.Card(
dbc.CardBody([
html.H3("Hyperparameters correlation"),
dbc.Table(hyper_corr_importance_table_header + hyper_corr_importance_table_body, bordered=False),
#className="p-5"
#self.shap_dependence.layout()
])), html.Br(),
]),
])
] ) | AMLBID | /Explainer/dashboard_components/composites.py | composites.py |
import numpy as np
from scipy.stats import norm
__author__ = 'bejar'
class SAX:
"""
Sax representation of the time series
"""
winlen = None
step = None
wrdlen = None
voc = None
intervals = None
def __init__(self, window_length=100, step=1, word_length=10, voc_size=3):
self.winlen = window_length
self.step = step
self.wrdlen = word_length
self.voc = voc_size
self.intervals = norm.ppf(np.arange(self.voc)/float(self.voc))
def transform(self, X):
"""
Computes the SAX representation for a vector of data
The data is normalized before transformation
Beware: If length is not a divisor of the vector size, some data
points at the end will be ignored
The intervals for the discretization are computed on every call
:param length: Length of the wore
:param voc: Length of the vocabulary
:param intervals: list with the breakpoints of the discretization
if the parameter does not exist the intervals are
computed, but this is inefficient
:return: a vector with length values in the range [-voc//2,voc//2]
"""
nwin = ((X.shape[0]-self.winlen)//self.step) + 1
res = np.zeros((nwin, self.wrdlen))
for w in range(nwin):
chunk = X[w*self.step: (w*self.step) + self.winlen]
res[w] = self._SAX_function(chunk, self.wrdlen, self.voc, self.intervals)
return res
@staticmethod
def _SAX_function(data, length, voc, intervals):
"""
Computes the SAX representation for a vector of data
The data is normalized before transformation
Beware: If length is not a divisor of the vector size, some data
points at the end will be ignored
The intervals for the discretization are computed on every call
:param length: Length of the wore
:param voc: Length of the vocabulary
:param intervals: list with the breakpoints of the discretization
if the parameter does not exist the intervals are
computed, but this is inefficient
:return: a vector with length values in the range [-voc//2,voc//2]
"""
index = np.zeros(length)
data -= data.mean(0)
data = np.nan_to_num(data / data.std(0))
step = int(data.shape[0] / length)
for i in range(length):
mr = np.mean(data[i*step:(i*step)+step])
j = voc - 1
while mr < intervals[j]:
j -= 1
index[i] = j - int(voc/2)
return index | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/time_series/discretization/SAX.py | SAX.py |
__author__ = 'bejar'
import numpy as np
from sklearn.neighbors import kneighbors_graph, NearestNeighbors
from operator import itemgetter
class LaplacianScore():
"""
Laplacian Score algorithm
Parameters:
n_neighbors: int
Number of neighbors to compute the similarity matrix
bandwidth: float
Bandwidth for the gaussian similarity kernel
"""
scores_ = None
def __init__(self, n_neighbors=5, bandwidth=0.01, k=None):
"""
Initial values of the parameters
:param int n_neighbors: Number of neighbors for the spectral matrix
:param float bandwidth: Bandwidth for the gaussian kernel
:param int k: number of features to select
"""
self._n_neighbors = n_neighbors
self._bandwidth = bandwidth
self._k = k
def fit(self, X):
"""
Computes the laplacian scores for the dataset
:param matrix X: is a [n_examples, n_attributes] numpy array
"""
self._fit_process(X)
return self
def _best_k_scores(self, k=5):
"""
returns the indices of the best k attributes according to the score
:param k:
:return:
"""
if self.scores_ is None:
raise Exception('Laplacian Score: Not fitted')
else:
l = list(enumerate(self.scores_))
l = sorted(l, key=itemgetter(1), reverse=True)
return [l[i][0] for i in range(k)]
def fit_transform(self, X):
"""
Selects the features and returns the dataset with only the k best ones
:param matrix X: dataset
:return:
"""
self._fit_process(X)
l = list(enumerate(self.scores_))
l = sorted(l, key=itemgetter(1), reverse=True)
lsel = [l[i][0] for i in range(self._k)]
return X[:, lsel]
# Todo: implementation only with sparse matrices
def _fit_process(self, X):
"""
Computes the Laplacian score for the attributes
:param X:
:return:
"""
self.scores_ = np.zeros(X.shape[1])
# Similarity matrix
S = kneighbors_graph(X, n_neighbors=self._n_neighbors, mode='distance')
S = S.toarray()
S *= S
S /= self._bandwidth
S = -S
ones = np.ones(X.shape[0])
D = np.diag(np.dot(S, ones))
L = D - S
qt = D.sum()
for at in range(X.shape[1]):
Fr = X[:, at]
Fr_hat = Fr - np.dot(np.dot(Fr, D) / qt, ones)
score1 = np.dot(np.dot(Fr_hat, L), Fr_hat)
score2 = np.dot(np.dot(Fr_hat, D), Fr_hat)
self.scores_[at] = score1 / score2 | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/feature_selection/unsupervised/LaplacianScore.py | LaplacianScore.py |
# Author: Nico de Vos <[email protected]>
# License: MIT
from collections import defaultdict
import numpy as np
from .KModes import KModes
def euclidean_dissim(a, b):
"""Euclidean distance dissimilarity function"""
return np.sum((a - b) ** 2, axis=1)
def move_point_num(point, ipoint, to_clust, from_clust,
cl_attr_sum, membership):
"""Move point between clusters, numerical attributes."""
membership[to_clust, ipoint] = 1
membership[from_clust, ipoint] = 0
# Update sum of attributes in cluster.
for iattr, curattr in enumerate(point):
cl_attr_sum[to_clust][iattr] += curattr
cl_attr_sum[from_clust][iattr] -= curattr
return cl_attr_sum, membership
def _labels_cost(Xnum, Xcat, centroids, gamma):
"""Calculate labels and cost function given a matrix of points and
a list of centroids for the k-prototypes algorithm.
"""
npoints = Xnum.shape[0]
cost = 0.
labels = np.empty(npoints, dtype='int64')
for ipoint in range(npoints):
# Numerical cost = sum of Euclidean distances
num_costs = euclidean_dissim(centroids[0], Xnum[ipoint])
cat_costs = KModes.matching_dissim(centroids[1], Xcat[ipoint])
# Gamma relates the categorical cost to the numerical cost.
tot_costs = num_costs + gamma * cat_costs
clust = np.argmin(tot_costs)
labels[ipoint] = clust
cost += tot_costs[clust]
return labels, cost
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq,
membership, gamma):
"""Single iteration of the k-prototypes algorithm"""
moves = 0
for ipoint in range(Xnum.shape[0]):
clust = np.argmin(
euclidean_dissim(centroids[0], Xnum[ipoint]) +
gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint]))
if membership[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membership[:, ipoint])[0][0]
cl_attr_sum, membership = move_point_num(
Xnum[ipoint], ipoint, clust, old_clust, cl_attr_sum,
membership)
cl_attr_freq, membership = KModes.move_point_cat(
Xcat[ipoint], ipoint, clust, old_clust, cl_attr_freq,
membership)
# Update new and old centroids by choosing mean for numerical
# and mode for categorical attributes.
for iattr in range(len(Xnum[ipoint])):
for curc in (clust, old_clust):
if sum(membership[curc, :]):
centroids[0][curc, iattr] = \
cl_attr_sum[curc, iattr] / sum(membership[curc, :])
else:
centroids[0][curc, iattr] = 0.
for iattr in range(len(Xcat[ipoint])):
for curc in (clust, old_clust):
centroids[1][curc, iattr] = \
KModes.get_max_value_key(cl_attr_freq[curc][iattr])
# In case of an empty cluster, reinitialize with a random point
# from largest cluster.
if sum(membership[old_clust, :]) == 0:
from_clust = membership.sum(axis=1).argmax()
choices = \
[ii for ii, ch in enumerate(membership[from_clust, :]) if ch]
rindx = np.random.choice(choices)
cl_attr_freq, membership = move_point_num(
Xnum[rindx], rindx, old_clust, from_clust, cl_attr_sum,
membership)
cl_attr_freq, membership = KModes.move_point_cat(
Xcat[rindx], rindx, old_clust, from_clust, cl_attr_freq,
membership)
return centroids, moves
def k_prototypes(X, n_clusters, gamma, init, n_init, max_iter, verbose):
"""k-prototypes algorithm"""
assert len(X) == 2, "X should be a list of Xnum and Xcat arrays"
# List where [0] = numerical part of centroid and
# [1] = categorical part. Same for centroids.
Xnum, Xcat = X
# Convert to numpy arrays, if needed.
Xnum = np.asanyarray(Xnum)
Xcat = np.asanyarray(Xcat)
nnumpoints, nnumattrs = Xnum.shape
ncatpoints, ncatattrs = Xcat.shape
assert nnumpoints == ncatpoints,\
"Uneven number of numerical and categorical points"
npoints = nnumpoints
assert n_clusters < npoints, "More clusters than data points?"
# Estimate a good value for gamma, which determines the weighing of
# categorical values in clusters (see Huang [1997]).
if gamma is None:
gamma = 0.5 * Xnum.std()
all_centroids = []
all_labels = []
all_costs = []
for init_no in range(n_init):
# For numerical part of initialization, we don't have a guarantee
# that there is not an empty cluster, so we need to retry until
# there is none.
while True:
# _____ INIT _____
if verbose:
print("Init: initializing centroids")
if init == 'Huang':
centroids = KModes.init_huang(Xcat, n_clusters)
elif init == 'Cao':
centroids = KModes.init_cao(Xcat, n_clusters)
elif init == 'random':
seeds = np.random.choice(range(npoints), n_clusters)
centroids = Xcat[seeds]
elif hasattr(init, '__array__'):
centroids = init
else:
raise NotImplementedError
# Numerical is initialized by drawing from normal distribution,
# categorical following the k-modes methods.
meanX = np.mean(Xnum, axis=0)
stdX = np.std(Xnum, axis=0)
centroids = [meanX + np.random.randn(n_clusters, nnumattrs) * stdX,
centroids]
if verbose:
print("Init: initializing clusters")
membership = np.zeros((n_clusters, npoints), dtype='int64')
# Keep track of the sum of attribute values per cluster so that we
# can do k-means on the numerical attributes.
cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype='float')
# cl_attr_freq is a list of lists with dictionaries that contain
# the frequencies of values per cluster and attribute.
cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)]
for _ in range(n_clusters)]
for ipoint in range(npoints):
# Initial assignment to clusters
clust = np.argmin(
euclidean_dissim(centroids[0], Xnum[ipoint]) +
gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint]))
membership[clust, ipoint] = 1
# Count attribute values per cluster.
for iattr, curattr in enumerate(Xnum[ipoint]):
cl_attr_sum[clust, iattr] += curattr
for iattr, curattr in enumerate(Xcat[ipoint]):
cl_attr_freq[clust][iattr][curattr] += 1
# If no empty clusters, then consider initialization finalized.
if membership.sum(axis=1).min() > 0:
break
# Perform an initial centroid update.
for ik in range(n_clusters):
for iattr in range(nnumattrs):
centroids[0][ik, iattr] = \
cl_attr_sum[ik, iattr] / sum(membership[ik, :])
for iattr in range(ncatattrs):
centroids[1][ik, iattr] = \
KModes.get_max_value_key(cl_attr_freq[ik][iattr])
# _____ ITERATION _____
if verbose:
print("Starting iterations...")
itr = 0
converged = False
cost = np.Inf
while itr <= max_iter and not converged:
itr += 1
centroids, moves = _k_prototypes_iter(
Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq,
membership, gamma)
# All points seen in this iteration
labels, ncost = \
_labels_cost(Xnum, Xcat, centroids, gamma)
converged = (moves == 0) or (ncost >= cost)
cost = ncost
if verbose:
print("Run: {}, iteration: {}/{}, moves: {}, ncost: {}"
.format(init_no + 1, itr, max_iter, moves, ncost))
# Store results of current run.
all_centroids.append(centroids)
all_labels.append(labels)
all_costs.append(cost)
best = np.argmin(all_costs)
if n_init > 1 and verbose:
print("Best run was number {}".format(best + 1))
# Note: return gamma in case it was automatically determined.
return all_centroids[best], all_labels[best], all_costs[best], gamma
class KPrototypes(KModes):
"""k-protoypes clustering algorithm for mixed numerical/categorical data.
Parameters
-----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
gamma : float, default: None
Weighing factor that determines relative importance of numerical vs.
categorical attributes (see discussion in Huang [1997]). By default,
automatically calculated from data.
max_iter : int, default: 300
Maximum number of iterations of the k-modes algorithm for a
single run.
n_init : int, default: 10
Number of time the k-modes algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of cost.
init : {'Huang', 'Cao', 'random' or an ndarray}
Method for initialization:
'Huang': Method in Huang [1997, 1998]
'Cao': Method in Cao et al. [2009]
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centroids.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centroids_ : array, [n_clusters, n_features]
Categories of cluster centroids
labels_ :
Labels of each point
cost_ : float
Clustering cost, defined as the sum distance of all points to
their respective cluster centroids.
Notes
-----
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large
data sets with categorical values, Data Mining and Knowledge
Discovery 2(3), 1998.
"""
def __init__(self, n_clusters=8, gamma=None, init='Huang', n_init=10,
max_iter=100, verbose=0):
super(KPrototypes, self).__init__(n_clusters, init, n_init, max_iter,
verbose)
self.gamma = gamma
def fit(self, X):
"""Compute k-prototypes clustering.
Parameters
----------
X : list of array-like, shape=[[n_num_samples, n_features],
[n_cat_samples, n_features]]
"""
# If self.gamma is None, gamma will be automatically determined from
# the data. The function below returns its value.
self.cluster_centroids_, self.labels_, self.cost_, self.gamma = \
k_prototypes(X, self.n_clusters, self.gamma, self.init,
self.n_init, self.max_iter, self.verbose)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : list of array-like, shape=[[n_num_samples, n_features],
[n_cat_samples, n_features]]
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
assert hasattr(self, 'cluster_centroids_'), "Model not yet fitted."
return _labels_cost(X[0], X[1], self.cluster_centroids_,
self.gamma)[0] | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/cluster/KPrototypes.py | KPrototypes.py |
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils import check_random_state
class KernelKMeans(BaseEstimator, ClusterMixin):
"""
Kernel K-means
Reference
---------
Kernel k-means, Spectral Clustering and Normalized Cuts.
Inderjit S. Dhillon, Yuqiang Guan, Brian Kulis.
KDD 2004.
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-3, random_state=None,
kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None, verbose=0):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.verbose = verbose
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def fit(self, X, y=None, sample_weight=None):
n_samples = X.shape[0]
K = self._get_kernel(X)
sw = sample_weight if sample_weight else np.ones(n_samples)
self.sample_weight_ = sw
rs = check_random_state(self.random_state)
self.labels_ = rs.randint(self.n_clusters, size=n_samples)
dist = np.zeros((n_samples, self.n_clusters))
self.within_distances_ = np.zeros(self.n_clusters)
for it in xrange(self.max_iter):
dist.fill(0)
self._compute_dist(K, dist, self.within_distances_,
update_within=True)
labels_old = self.labels_
self.labels_ = dist.argmin(axis=1)
# Compute the number of samples whose cluster did not change
# since last iteration.
n_same = np.sum((self.labels_ - labels_old) == 0)
if 1 - float(n_same) / n_samples < self.tol:
if self.verbose:
print("Converged at iteration", it + 1)
break
self.X_fit_ = X
return self
def _compute_dist(self, K, dist, within_distances, update_within):
"""Compute a n_samples x n_clusters distance matrix using the
kernel trick."""
sw = self.sample_weight_
for j in xrange(self.n_clusters):
mask = self.labels_ == j
if np.sum(mask) == 0:
raise ValueError("Empty cluster found, try smaller n_cluster.")
denom = sw[mask].sum()
denomsq = denom * denom
if update_within:
KK = K[mask][:, mask] # K[mask, mask] does not work.
dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)
within_distances[j] = dist_j
dist[:, j] += dist_j
else:
dist[:, j] += within_distances[j]
dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom
def predict(self, X):
K = self._get_kernel(X, self.X_fit_)
n_samples = X.shape[0]
dist = np.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist, self.within_distances_,
update_within=False)
return dist.argmin(axis=1)
if __name__ == '__main__':
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=1000, centers=5, random_state=0)
km = KernelKMeans(n_clusters=3, max_iter=100, random_state=0, verbose=1)
print( km.fit_predict(X[:10]))
print(km.predict(X[:10])) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/cluster/KernelKMeans.py | KernelKMeans.py |
# Author: Nico de Vos <[email protected]>
# License: MIT
from collections import defaultdict
import numpy as np
def get_max_value_key(dic):
"""Fast method to get key for maximum value in dict."""
v = list(dic.values())
k = list(dic.keys())
return k[v.index(max(v))]
def matching_dissim(a, b):
"""Simple matching dissimilarity function"""
return np.sum(a != b, axis=1)
def init_huang(X, n_clusters):
"""Initialize n_clusters according to method by Huang [1997]."""
nattrs = X.shape[1]
centroids = np.empty((n_clusters, nattrs), dtype='object')
# determine frequencies of attributes
for iattr in range(nattrs):
freq = defaultdict(int)
for curattr in X[:, iattr]:
freq[curattr] += 1
# Sample centroids using the probabilities of attributes.
# (I assume that's what's meant in the Huang [1998] paper; it works,
# at least)
# Note: sampling using population in static list with as many choices
# as frequency counts. Since the counts are small integers,
# memory consumption is low.
choices = [chc for chc, wght in freq.items() for _ in range(wght)]
centroids[:, iattr] = np.random.choice(choices, n_clusters)
# The previously chosen centroids could result in empty clusters,
# so set centroid to closest point in X.
for ik in range(n_clusters):
ndx = np.argsort(matching_dissim(X, centroids[ik]))
# We want the centroid to be unique.
while np.all(X[ndx[0]] == centroids, axis=1).any():
ndx = np.delete(ndx, 0)
centroids[ik] = X[ndx[0]]
return centroids
def init_cao(X, n_clusters):
"""Initialize n_clusters according to method by Cao et al. [2009].
Note: O(N * attr * n_clusters**2), so watch out with large n_clusters
"""
npoints, nattrs = X.shape
centroids = np.empty((n_clusters, nattrs), dtype='object')
# Method is base don determining density of points.
dens = np.zeros(npoints)
for iattr in range(nattrs):
freq = defaultdict(int)
for val in X[:, iattr]:
freq[val] += 1
for ipoint in range(npoints):
dens[ipoint] += freq[X[ipoint, iattr]] / float(nattrs)
dens /= npoints
# Choose initial centroids based on distance and density.
centroids[0] = X[np.argmax(dens)]
if n_clusters > 1:
# For the remaining centroids, choose maximum dens * dissim to the
# (already assigned) centroid with the lowest dens * dissim.
for ik in range(1, n_clusters):
dd = np.empty((ik, npoints))
for ikk in range(ik):
dd[ikk] = matching_dissim(X, centroids[ikk]) * dens
centroids[ik] = X[np.argmax(np.min(dd, axis=0))]
return centroids
def move_point_cat(point, ipoint, to_clust, from_clust,
cl_attr_freq, membership):
"""Move point between clusters, categorical attributes."""
membership[to_clust, ipoint] = 1
membership[from_clust, ipoint] = 0
# Update frequencies of attributes in cluster.
for iattr, curattr in enumerate(point):
cl_attr_freq[to_clust][iattr][curattr] += 1
cl_attr_freq[from_clust][iattr][curattr] -= 1
return cl_attr_freq, membership
def _labels_cost(X, centroids):
"""Calculate labels and cost function given a matrix of points and
a list of centroids for the k-modes algorithm.
"""
npoints = X.shape[0]
cost = 0.
labels = np.empty(npoints, dtype='int64')
for ipoint, curpoint in enumerate(X):
diss = matching_dissim(centroids, curpoint)
clust = np.argmin(diss)
labels[ipoint] = clust
cost += diss[clust]
return labels, cost
def _k_modes_iter(X, centroids, cl_attr_freq, membership):
"""Single iteration of k-modes clustering algorithm"""
moves = 0
for ipoint, curpoint in enumerate(X):
clust = np.argmin(matching_dissim(centroids, curpoint))
if membership[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membership[:, ipoint])[0][0]
cl_attr_freq, membership = move_point_cat(
curpoint, ipoint, clust, old_clust, cl_attr_freq, membership)
# Update new and old centroids by choosing mode of attribute.
for iattr in range(len(curpoint)):
for curc in (clust, old_clust):
centroids[curc, iattr] = \
get_max_value_key(cl_attr_freq[curc][iattr])
# In case of an empty cluster, reinitialize with a random point
# from the largest cluster.
if sum(membership[old_clust, :]) == 0:
from_clust = membership.sum(axis=1).argmax()
choices = \
[ii for ii, ch in enumerate(membership[from_clust, :]) if ch]
rindx = np.random.choice(choices)
cl_attr_freq, membership = move_point_cat(
X[rindx], rindx, old_clust, from_clust, cl_attr_freq,
membership)
return centroids, moves
def k_modes(X, n_clusters, init, n_init, max_iter, verbose):
"""k-modes algorithm"""
# Convert to numpy array, if needed.
X = np.asanyarray(X)
npoints, nattrs = X.shape
assert n_clusters < npoints, "More clusters than data points?"
all_centroids = []
all_labels = []
all_costs = []
for init_no in range(n_init):
# _____ INIT _____
if verbose:
print("Init: initializing centroids")
if init == 'Huang':
centroids = init_huang(X, n_clusters)
elif init == 'Cao':
centroids = init_cao(X, n_clusters)
elif init == 'random':
seeds = np.random.choice(range(npoints), n_clusters)
centroids = X[seeds]
elif hasattr(init, '__array__'):
centroids = init
else:
raise NotImplementedError
if verbose:
print("Init: initializing clusters")
membership = np.zeros((n_clusters, npoints), dtype='int64')
# cl_attr_freq is a list of lists with dictionaries that contain the
# frequencies of values per cluster and attribute.
cl_attr_freq = [[defaultdict(int) for _ in range(nattrs)]
for _ in range(n_clusters)]
for ipoint, curpoint in enumerate(X):
# Initial assignment to clusters
clust = np.argmin(matching_dissim(centroids, curpoint))
membership[clust, ipoint] = 1
# Count attribute values per cluster.
for iattr, curattr in enumerate(curpoint):
cl_attr_freq[clust][iattr][curattr] += 1
# Perform an initial centroid update.
for ik in range(n_clusters):
for iattr in range(nattrs):
centroids[ik, iattr] = get_max_value_key(
cl_attr_freq[ik][iattr])
# _____ ITERATION _____
if verbose:
print("Starting iterations...")
itr = 0
converged = False
cost = np.Inf
while itr <= max_iter and not converged:
itr += 1
centroids, moves = \
_k_modes_iter(X, centroids, cl_attr_freq, membership)
# All points seen in this iteration
labels, ncost = _labels_cost(X, centroids)
converged = (moves == 0) or (ncost >= cost)
cost = ncost
if verbose:
print("Run {}, iteration: {}/{}, moves: {}, cost: {}"
.format(init_no + 1, itr, max_iter, moves, cost))
# Store result of current run.
all_centroids.append(centroids)
all_labels.append(labels)
all_costs.append(cost)
best = np.argmin(all_costs)
if n_init > 1 and verbose:
print("Best run was number {}".format(best + 1))
return all_centroids[best], all_labels[best], all_costs[best]
class KModes(object):
"""k-modes clustering algorithm for categorical data.
Parameters
-----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-modes algorithm for a
single run.
n_init : int, default: 10
Number of time the k-modes algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of cost.
init : {'Huang', 'Cao', 'random' or an ndarray}
Method for initialization:
'Huang': Method in Huang [1997, 1998]
'Cao': Method in Cao et al. [2009]
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centroids.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centroids_ : array, [n_clusters, n_features]
Categories of cluster centroids
labels_ :
Labels of each point
cost_ : float
Clustering cost, defined as the sum distance of all points to
their respective cluster centroids.
Notes
-----
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large
data sets with categorical values, Data Mining and Knowledge
Discovery 2(3), 1998.
"""
def __init__(self, n_clusters=8, init='Cao', n_init=10, max_iter=100,
verbose=0):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.n_init = n_init
self.verbose = verbose
if (self.init == 'Cao' or hasattr(self.init, '__array__')) and \
self.n_init > 1:
if self.verbose:
print("Initialization method and algorithm are deterministic. "
"Setting n_init to 1.")
self.n_init = 1
self.max_iter = max_iter
def fit(self, X):
"""Compute k-modes clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
"""
self.cluster_centroids_, self.labels_, self.cost_ = \
k_modes(X, self.n_clusters, self.init, self.n_init,
self.max_iter, self.verbose)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centroids and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
assert hasattr(self, 'cluster_centroids_'), "Model not yet fitted."
return _labels_cost(X, self.cluster_centroids_)[0] | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/cluster/KModes.py | KModes.py |
__author__ = 'bejar'
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import euclidean_distances
class Leader(BaseEstimator, ClusterMixin, TransformerMixin):
"""Leader Algorithm Clustering
Paramerets:
radius: float
Clustering radius for asigning examples to a cluster
"""
def __init__(self, radius):
self.radius = radius
self.cluster_centers_ = None
self.labels_ = None
self.cluster_sizes_ = None
def num_clusters(self):
return self.cluster_centers_.shape[0]
def fit(self, X):
"""
Clusters the examples
:param X:
:return:
"""
self.cluster_centers_, self.labels_, self.cluster_sizes_ = self._fit_process(X)
return self
def predict(self, X):
"""
Returns the nearest cluster for a data matrix
@param X:
@return:
"""
clasif = []
for i in range(X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), self.cluster_centers_)
if mdist <= self.radius:
clasif.append(ncl)
else:
clasif.append(-1)
return clasif
def _fit_process(self, X):
"""
Clusters incrementally the examples
:param X:
:return:
"""
assignments = []
scenters = np.zeros((1, X.shape[1]))
centers = np.zeros((1, X.shape[1]))
# Initialize with the first example
scenters[0] = X[0]
centers[0] = X[0]
assignments.append([0])
csizes = np.array([1])
# Cluster the rest of examples
for i in range(1, X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), centers)
# if distance is less than radius, introduce example in nearest class
if mdist <= self.radius:
scenters[ncl] += X[i]
csizes[ncl] += 1
centers[ncl] = scenters[ncl] / csizes[ncl]
assignments[ncl].append(i)
else: # Create a new cluster
scenters = np.append(scenters, np.array([X[i]]), 0)
centers = np.append(centers, np.array([X[i]]), 0)
csizes = np.append(csizes, [1], 0)
assignments.append([i])
labels = np.zeros(X.shape[0])
for l, ej in enumerate(assignments):
for e in ej:
labels[e] = l
return centers, labels, csizes
@staticmethod
def _find_nearest_cluster(examp, centers):
"""
Finds the nearest cluster for an example
:param examp:
:param centers:
:return:
"""
dist = euclidean_distances(centers, examp)
pmin = np.argmin(dist)
vmin = np.min(dist)
return pmin, vmin
if __name__ == '__main__':
from sklearn.datasets import make_blobs, load_iris, make_circles
X, y_data = make_circles(n_samples=1000, noise=0.5, random_state=4, factor=0.5)
ld = Leader(radius=.01)
ld.fit(X)
print(ld.predict(np.array([[0,0]]))) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/cluster/Leader.py | Leader.py |
__author__ = 'bejar'
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
class GlobalKMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""Global K-means Algorithm
Paramereters:
n_clusters: int
maximum number of clusters to obtain
algorithm string
'classical' the classical algorithm
'bagirov' the Bagirov 2006 variant
"""
def __init__(self, n_clusters, algorithm='classical'):
self.n_clusters = n_clusters
self.cluster_centers_ = None
self.labels_ = None
self.cluster_sizes_ = None
self.inertia_ = None
self.algorithm = algorithm
def fit(self,X):
"""
Clusters the examples
:param X:
:return:
"""
if self.algorithm == 'classical':
self.cluster_centers_, self.labels_, self.inertia_ = self._fit_process(X)
elif self.algorithm == 'bagirov':
self.cluster_centers_, self.labels_, self.inertia_ = self._fit_process_bagirov(X)
return self
def predict(self,X):
"""
Returns the nearest cluster for a data matrix
@param X:
@return:
"""
clasif = []
for i in range(X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), self.cluster_centers_)
if mdist <= self.radius:
clasif.append(ncl)
else:
clasif.append(-1)
return clasif
def _fit_process(self, X):
"""
Classical global k-means algorithm
:param X:
:return:
"""
# Compute the centroid of the dataset
centroids = sum(X)/X.shape[0]
centroids.shape = (1, X.shape[1])
for i in range(2, self.n_clusters+1):
mininertia = np.infty
for j in range(X.shape[0]):
newcentroids = np.vstack((centroids, X[j]))
#print newcentroids.shape
km = KMeans(n_clusters=i, init=newcentroids, n_init=1)
km.fit(X)
if mininertia > km.inertia_:
mininertia = km.inertia_
bestkm = km
centroids = bestkm.cluster_centers_
return bestkm.cluster_centers_, bestkm.labels_, bestkm.inertia_
def _fit_process_bagirov(self, X):
"""
Clusters using the global K-means algorithm Bagirov variation
:param X:
:return:
"""
# Create a KNN structure for fast search
self._neighbors = NearestNeighbors()
self._neighbors.fit(X)
# Compute the centroid of the dataset
centroids = sum(X)/X.shape[0]
assignments = [0 for i in range(X.shape[0])]
centroids.shape = (1, X.shape[1])
# compute the distance of the examples to the centroids
mindist = np.zeros(X.shape[0])
for i in range(X.shape[0]):
mindist[i] = euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1), squared=True)[0]
for k in range(2, self.n_clusters+1):
newCentroid = self._compute_next_centroid(X, centroids, assignments, mindist)
centroids = np.vstack((centroids,newCentroid))
km = KMeans(n_clusters=k, init=centroids, n_init=1)
km.fit(X)
assignments = km.labels_
for i in range(X.shape[0]):
mindist[i] = euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1), squared=True)[0]
return km.cluster_centers_, km.labels_, km.inertia_
def _compute_next_centroid(self, X, centroids, assignments, mindist):
"""
Computes the candidate for the next centroid
:param X:
:param centroids:
:return:
"""
minsum = np.infty
candCentroid = None
# Compute the first candidate to new centroid
for i in range(X.shape[0]):
distance = euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1))[0]
S2 = self._neighbors.radius_neighbors(X[i].reshape(1, -1), radius=distance, return_distance=False)[0]
S2centroid = np.sum(X[S2], axis=0)/len(S2)
S2centroid.shape = (1, X.shape[1])
cost = self._compute_fk(X, mindist, S2centroid)
if cost < minsum:
minsum = cost
candCentroid = S2centroid
# Compute examples for the new centroid
S2 = []
newDist = euclidean_distances(X, candCentroid.reshape(1, -1), squared=True)
for i in range(X.shape[0]):
if newDist[i] < mindist[i]:
S2.append(i)
newCentroid = sum(X[S2])/len(S2)
newCentroid.shape = (1, X.shape[1])
while not (candCentroid == newCentroid).all():
candCentroid = newCentroid
S2 = []
newDist = euclidean_distances(X, candCentroid.reshape(1, -1), squared=True)
for i in range(X.shape[0]):
if newDist[i] < mindist[i]:
S2.append(i)
newCentroid = np.sum(X[S2], axis=0)/len(S2)
newCentroid.shape = (1, X.shape[1])
return candCentroid
def _compute_fk(self, X, mindist, ccentroid):
"""
Computes the cost function
:param X:
:param mindist:
:param ccentroid:
:return:
"""
# Distances among the examples and the candidate centroid
centdist = euclidean_distances(X, ccentroid.reshape(1, -1), squared=True)
fk = 0
for i in range(X.shape[0]):
fk = fk + min(mindist[i], centdist[i][0])
return fk
@staticmethod
def _find_nearest_cluster(examp, centers):
"""
Finds the nearest cluster for an example
:param examp:
:param centers:
:return:
"""
dist = euclidean_distances(centers, examp.reshape(1, -1))
pmin = np.argmin(dist)
vmin = np.min(dist)
return pmin, vmin | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/cluster/GlobalKMeans.py | GlobalKMeans.py |
__author__ = 'bejar'
import numpy as np
from sklearn.base import TransformerMixin
from sklearn.neighbors import NearestNeighbors
class KnnImputer(TransformerMixin):
"""
Missing values imputation using the mean of the k-neighbors considering the
dimensions that are not missing.
It only uses the examples that do not have any missing value
Parameters:
missing_values: float or 'NaN'
Value that indicates a missing value
n_neighbors: int
The number of neighbors to consider
distance: str
distance to use to compute the neighbors ('euclidean')
"""
neigh = None
miss_val = None
dist = None
miss_ind_ = None
def __init__(self, missing_values='NaN', n_neighbors=1, distance='euclidean'):
self.neigh = n_neighbors
self.miss_val = float(missing_values)
self.dist = distance
def fit(self):
"""
does nothing
"""
def _transform(self, X):
"""
Imputes the missings
:param X:
:return:
"""
l_miss_ex = []
l_no_miss_ex = []
self.miss_ind_ = []
for row in range(X.shape[0]):
l_miss_att = []
for column in range(X.shape[1]):
if np.isnan(X[row, column]) or X[row, column] ==self.miss_val:
l_miss_att.append(column)
if l_miss_att:
l_miss_ex.append((row, l_miss_att))
self.miss_ind_.append(row)
else:
l_no_miss_ex.append(row)
if not l_no_miss_ex:
raise Exception('KnnImputer: All examples have missing values')
else:
nomiss = X[l_no_miss_ex]
if nomiss.shape[0] < self.neigh:
raise Exception('KnnImputer: Not enough examples without missings')
for ex, att in l_miss_ex:
l_sel = [s for s in range(X.shape[1]) if s not in att]
knn = NearestNeighbors(n_neighbors=self.neigh, metric=self.dist)
knn.fit(nomiss[:, l_sel])
l_neigh = knn.kneighbors(X[ex][l_sel].reshape(1, -1), return_distance=False)[0]
for a in att:
l_mean = nomiss[l_neigh, a]
X[ex][a] = np.mean(l_mean)
return X
def fit_transform(self, X, copy=True):
"""
Looks for the examples with missing values and computes the new values
:param matrix X: data matrix
:param bool copy: If True returns a copy of the data
:return:
"""
if copy:
y = X.copy()
else:
y = X
self._transform(y)
return y
if __name__ == '__main__':
mean, cov = [0, 0, 0], [(1, .5, .5), (.5, 1, .5), (.5, .5, 1)]
data = np.random.multivariate_normal(mean, cov, 200)
vals = np.random.choice(200, size=20, replace=False)
for v in vals[0:20]:
data[v][0] = np.nan
kimp = KnnImputer(n_neighbors=2)
data2 = kimp.fit_transform(data)
print (kimp.miss_ind_)
for i in range(data.shape[0]):
print (data[i], data2[i]) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/preprocessing/Imputer.py | Imputer.py |
__author__ = 'bejar'
import numpy as np
from sklearn.base import TransformerMixin
#Todo: Add the possibility of using the (weighted) mean value of the interval
class Discretizer(TransformerMixin):
"""
Discretization of the attributes of a dataset (unsupervised)
Parameters:
method: str
* 'equal' equal sized bins
* 'frequency' bins with the same number of examples
bins: int
number of bins
"""
intervals = None
def __init__(self, method='equal', bins=2):
self.method = method
self.bins = bins
def _fit(self, X):
"""
Computes the discretization intervals
:param matrix X:
:return:
"""
if self.method == 'equal':
self._fit_equal(X)
elif self.method == 'frequency':
self._fit_frequency(X)
def _fit_equal(self, X):
"""
Computes the discretization intervals for equal sized discretization
:param X:
:return:
"""
self.intervals = np.zeros((self.bins, X.shape[1]))
for i in range(X.shape[1]):
vmin = np.min(X[:, i])
vmax = np.max(X[:, i])
step = np.abs(vmax - vmin) / float(self.bins)
for j in range(self.bins):
vmin += step
self.intervals[j, i] = vmin
self.intervals[self.bins-1, i] += 0.00000000001
def _fit_frequency(self, X):
"""
Computes the discretization intervals for equal frequency
:param X:
:return:
"""
self.intervals = np.zeros((self.bins, X.shape[1]))
quant = X.shape[0] / float(self.bins)
for i in range(X.shape[1]):
lvals = sorted(X[:, i])
nb = 0
while nb < self.bins:
self.intervals[nb, i] = lvals[int((quant*nb) + quant)-1]
nb += 1
self.intervals[self.bins-1, i] += 0.00000000001
def _transform(self, X, copy=False):
"""
Discretizes the attributes of a dataset
:param matrix X: Data matrix
:return:
"""
if self.intervals is None:
raise Exception('Discretizer: Not fitted')
if copy:
y = X.copy()
else:
y = X
self.__transform(y)
return y
def __discretizer(self, v, at):
"""
Determines the dicretized value for an atribute
:param v:
:return:
"""
i=0
while i< self.intervals.shape[0] and v > self.intervals[i, at]:
i += 1
return i
def __transform(self, X):
"""
Applies the discretization to all the attributes of the data matrix
:param X:
:return:
"""
for i in range(X.shape[1]):
for j in range(X.shape[0]):
X[j, i] = self.__discretizer(X[j, i], i)
def fit(self, X):
"""
Fits a set of discretization intervals using the data in X
:param matrix X: The data matrix
"""
self._fit(X)
def transform(self, X, copy=False):
"""
Applies previously fitted discretization intervals to X
:param matrix X: The data matrix
:param bool copy: Returns a copy of the transformed datamatrix
:return: The transformed datamatrix
"""
return self._transform(X, copy=copy)
def fit_transform(self, X, copy=False):
"""
Fits and transforms the data
:param matrix X: The data matrix
:param bool copy: Returns a copy of the transformed datamatrix
:return:The transformed datamatrix
"""
self._fit(X)
return self._transform(X, copy=copy) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/preprocessing/Discretizer.py | Discretizer.py |
__author__ = 'bejar'
import numpy as np
import numbers
from sklearn.utils import check_random_state, check_array
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
7/10/2015
A fixed and more flexible version of the scikit-learn function
Parameters
----------
n_samples : int, or sequence of integers, optional (default=100)
The total number of points equally divided among clusters.
or a sequence of the number of examples of each cluster
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
now works for the list of floats
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
if not isinstance(n_samples, list):
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
else:
if len(n_samples) != n_centers:
raise NameError('List of number of examples per center doer not match number of centers')
n_samples_per_center = n_samples
n_samples = sum(n_samples)
if not isinstance(cluster_std, list):
std_list = [cluster_std] * centers.shape[0]
else:
if len(cluster_std) != n_centers:
raise NameError('List of number of examples per center doer not match number of centers')
std_list = cluster_std
for i, (n, st) in enumerate(zip(n_samples_per_center, std_list)):
X.append(centers[i] + generator.normal(scale=st,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/datasets/samples_generator.py | samples_generator.py |
__author__ = 'bejar'
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.cluster.supervised import contingency_matrix, check_clusterings, mutual_info_score, entropy
def maplabels(labels):
"""
Returns a dictionary mapping a set of labels to an index
:param labels:
:return:
"""
poslabels = {}
for lab, p in zip(labels,range(len(labels))):
poslabels[lab] = p
return poslabels
def scatter_matrices_scores(X, labels, indices=['CH']):
"""
Computes different indices obtained from the Within and Between scatter matrices
Includes:
'SSW': Within scatter matrix score
'SSB': Between scatter matrix score
'Hartigan': Hartigan index
'CH': Caliski-Harabasz index
'Xu': Xu index
'ZCF': ZhaoChuFranti index
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
nclust = len(llabels)
nex = len(labels)
# Centroid of the data
centroid = np.zeros((1, X.shape[1]))
centroid += np.sum(X, axis=0)
centroid /= X.shape[0]
# Compute SSB
ccentroid = np.zeros((nclust, X.shape[1]))
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
ccentroid[poslabels[idx]] = center
dvector = euclidean_distances(centroid.reshape(1, -1), ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum() * center_mask.sum()
SSB = dist / len(labels)
# Compute SSW
dist = 0.0
for idx in llabels:
center_mask = labels == idx
dvector = euclidean_distances(X[center_mask], ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum()
SSW = dist / len(labels)
results = {}
if 'CH' in indices:
results['CH'] = (SSB/(nclust-1))/(SSW/(nex-nclust))
if 'Hartigan' in indices:
results['Hartigan'] = -np.log(SSW/SSB)
if 'ZCF' in indices:
results['ZCF'] = (SSW/SSB) * nclust
if 'Xu' in indices:
results['Xu'] = X.shape[1] * np.log(np.sqrt(SSW/(X.shape[1]*nex*nex)))+np.log(nclust)
if 'SSW' in indices:
results['SSW'] = SSW
if 'SSB' in indices:
results['SSB'] = SSB
if 'Inertia' in indices:
results['Inertia'] = SSW * len(labels)
return results
def within_scatter_matrix_score(X, labels):
"""
Computes the within scatter matrix score of a labeling of a clustering
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
dvector = euclidean_distances(X[center_mask], center.reshape(1, -1), squared=True)
dist += dvector.sum()
return dist / len(labels)
def between_scatter_matrix_score(X, labels):
"""
Computes the between scatter matrix score of a labeling of a clustering
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
# Centroid of the data
centroid = np.zeros((1, X.shape[1]))
centroid += np.sum(X, axis=0)
centroid /= X.shape[0]
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
dvector = euclidean_distances(centroid.reshape(1, -1), center.reshape(1, -1), squared=True)
dist += dvector.sum() * center_mask.sum()
return dist / len(labels)
def calinski_harabasz_score(X, labels):
"""
Computes the Calinski&Harabasz score for a labeling of the data
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
# Centroid of the data
centroid = np.zeros((1, X.shape[1]))
centroid += np.sum(X, axis=0)
centroid /= X.shape[0]
# Compute SSB
ccentroid = np.zeros((len(llabels), X.shape[1]))
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
ccentroid[poslabels[idx]] = center
dvector = euclidean_distances(centroid.reshape(1, -1), ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum() * center_mask.sum()
SSB = dist / len(labels)
# Compute SSW
dist = 0.0
for idx in llabels:
center_mask = labels == idx
dvector = euclidean_distances(X[center_mask], ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum()
SSW = dist / len(labels)
return (SSB/(len(llabels)-1))/(SSW/(len(labels)-len(llabels)))
def zhao_chu_franti_score(X, labels):
"""
Implements the method defined in:
Zhao, Q.; Xu, M. & Franti, P. Sum-of-Squares Based Cluster Validity Index and Significance Analysis
Adaptive and Natural Computing Algorithms, Springer Berlin Heidelberg, 2009, 5495, 313-322
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
# Centroid of the data
centroid = np.zeros((1, X.shape[1]))
centroid += np.sum(X, axis=0)
centroid /= X.shape[0]
# Compute SSB
ccentroid = np.zeros((len(llabels), X.shape[1]))
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
ccentroid[poslabels[idx]] = center
dvector = euclidean_distances(centroid.reshape(1, -1), ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum() * center_mask.sum()
SSB = dist / len(labels)
# Compute SSW
dist = 0.0
for idx in llabels:
center_mask = labels == idx
dvector = euclidean_distances(X[center_mask], ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum()
SSW = dist / len(labels)
return (SSW/SSB) * len(llabels)
def davies_bouldin_score(X, labels):
"""
Implements the Davies&Bouldin score for a labeling of the data
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
nclust = len(llabels)
# compute the centroids
centroids = np.zeros((nclust, X.shape[1]))
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
centroids[poslabels[idx]] = center
# Centroids distance matrix
cdistances = euclidean_distances(centroids)
# Examples to centroid mean distance
mdcentroid = np.zeros(nclust)
for idx in llabels:
center_mask = labels == idx
vdist = euclidean_distances(centroids[poslabels[idx]].reshape(1, -1), X[center_mask])
mdcentroid[poslabels[idx]] = vdist.sum()/center_mask.sum()
# Compute the index
dist = 0.0
for idxi in llabels:
lvals = []
disti = mdcentroid[poslabels[idxi]]
for idxj in llabels:
if idxj != idxi:
lvals.append((disti + mdcentroid[poslabels[idxj]])/cdistances[poslabels[idxi], poslabels[idxj]])
dist += max(lvals)
return dist/nclust
def jeffrey_divergence_score(X, labels):
"""
Implements the score based on the Jeffrey divergence that appears in:
Said, A.; Hadjidj, R. & Foufou, S. "Cluster validity index based on Jeffrey divergence"
Pattern Analysis and Applications, Springer London, 2015, 1-11
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
nclust = len(llabels)
# compute the centroids
centroids = np.zeros((nclust, X.shape[1]))
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
centroids[poslabels[idx]] = center
lcovs = []
linvcovs = []
for idx in llabels:
cov_mask = labels == idx
covar = np.cov(X[cov_mask].T)
lcovs.append(covar)
linvcovs.append(np.linalg.inv(covar))
traces = np.zeros((nclust, nclust))
for idx1 in llabels:
for idx2 in llabels:
traces[poslabels[idx1], poslabels[idx2]] = np.trace(np.dot(linvcovs[poslabels[idx1]], lcovs[poslabels[idx2]]))
traces[poslabels[idx1], poslabels[idx2]] += np.trace(np.dot(linvcovs[poslabels[idx2]], lcovs[poslabels[idx1]]))
traces[poslabels[idx1], poslabels[idx2]] /= 2.0
sumcov = np.zeros((nclust, nclust))
for idx1 in llabels:
for idx2 in llabels:
v1 = centroids[poslabels[idx1]]
v2 = centroids[poslabels[idx2]]
vm = v1-v2
mcv = linvcovs[poslabels[idx1]] + linvcovs[poslabels[idx2]]
sumcov[poslabels[idx1], poslabels[idx2]] = np.dot(vm.T, np.dot(mcv, vm))
sumcov[poslabels[idx1], poslabels[idx2]] /= 2.0
ssep = 0.0
for idx1 in llabels:
minv = np.inf
for idx2 in llabels:
if idx1 != idx2:
val = traces[poslabels[idx1], poslabels[idx2]] + sumcov[poslabels[idx1], poslabels[idx2]] - centroids.shape[1]
if minv > val:
minv = val
ssep += minv
scompact = 0.0
for idx in llabels:
center_mask = labels == idx
dvector = euclidean_distances(X[center_mask], centroids[poslabels[idx]], squared=True)
scompact += dvector.max()
return scompact/ssep
def variation_of_information_score(labels_true, labels_pred):
"""Variation of Information (Meila, 2003)
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
mutual = mutual_info_score(labels_true, labels_pred)
e1 = entropy(labels_true)
e2 = entropy(labels_pred)
return e1 + e2 - (2* mutual)
def jaccard_score(labels_true, labels_pred):
"""
Jaccard coeficient computed according to:
Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm
for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems,
Springer Berlin Heidelberg, 2008, 5179, 755-763
:param labels_true:
:param labels_pred:
:return:
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
contingency = contingency_matrix(labels_true, labels_pred)
cc = np.sum(contingency * contingency)
N11 = (cc - n_samples)
c1 = contingency.sum(axis=1)
N01 = np.sum(c1 * c1) - cc
c2 = contingency.sum(axis=0)
N10 = np.sum(c2 * c2) - cc
return (N11*1.0)/(N11+N01+N10)
def folkes_mallow_score(labels_true, labels_pred):
"""
Folkes&Mallow score computed according to:
Ceccarelli, M. & Maratea, A. A "Fuzzy Extension of Some Classical Concordance Measures and an Efficient Algorithm
for Their Computation" Knowledge-Based Intelligent Information and Engineering Systems,
Springer Berlin Heidelberg, 2008, 5179, 755-763
:param labels_true:
:param labels_pred:
:return:
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
contingency = contingency_matrix(labels_true, labels_pred)
cc = np.sum(contingency * contingency)
N11 = (cc - n_samples)
c1 = contingency.sum(axis=1)
N01 = np.sum(c1 * c1) - cc
c2 = contingency.sum(axis=0)
N10 = np.sum(c2 * c2) - cc
return (N11*1.0)/np.sqrt((N11+N01)*(N11+N10))
def bhargavi_gowda_score(X, labels):
"""
Score from:
Bhargavi, M. & Gowda, S. D. "A novel validity index with dynamic cut-off for determining true clusters"
Pattern Recognition , 2015, 48, 3673 - 3687
:param X:
:param labels:
:return:
"""
llabels = np.unique(labels)
poslabels = maplabels(llabels)
nclust = len(llabels)
nex = len(labels)
# Centroid of the data
centroid = np.zeros((1, X.shape[1]))
centroid += np.sum(X, axis=0)
centroid /= X.shape[0]
# Compute SSB and intracluster distance
ccentroid = np.zeros((nclust, X.shape[1]))
dist = 0.0
for idx in llabels:
center = np.zeros((1, X.shape[1]))
center_mask = labels == idx
center += np.sum(X[center_mask], axis=0)
center /= center_mask.sum()
ccentroid[poslabels[idx]] = center
dvector = euclidean_distances(centroid.reshape(1, -1), ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum() * center_mask.sum()
SSB = dist / len(labels)
# Compute SSW
dist = 0.0
Intra = 0.0
for idx in llabels:
center_mask = labels == idx
dvector = euclidean_distances(X[center_mask], ccentroid[poslabels[idx]].reshape(1, -1), squared=True)
dist += dvector.sum()
sdvector = euclidean_distances(X[center_mask], ccentroid[poslabels[idx]].reshape(1, -1), squared=False)
Intra += sdvector.sum()
SSW = dist / len(labels)
SST = SSB + SSW
# Centroids distance matrix
cdistances = euclidean_distances(ccentroid, squared=False)
Inter = np.sum(cdistances)/(nclust**2)
return(np.abs((SSW/SSB)*SST) - (Intra/Inter) - (nex - nclust)) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/metrics/cluster.py | cluster.py |
__author__ = 'bejar'
import numpy as np
def simetrized_kullback_leibler_divergence(m1, m2):
"""
Simetrized Kullback-Leibler divergence between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
lm1 = np.log(m1)
lm2 = np.log(m2)
lquot12 = np.log(m1 / m2)
lquot21 = np.log(m2 / m1)
dkl12 = lm1 * lquot12
dkl21 = lm2 * lquot21
return dkl12.sum() + dkl21.sum()
def kullback_leibler_divergence(m1, m2):
"""
Kullback-Leibler divergence between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
lm1 = np.log(m1)
lm2 = np.log(m2)
lquot12 = np.log(m1 / m2)
lquot21 = np.log(m2 / m1)
dkl12 = lm1 * lquot12
return dkl12.sum()
def jensen_shannon_divergence(m1, m2):
"""
Jensen Shannon Divergence between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
m = 0.5*(m1+m2)
return (0.5 * kullback_leibler_divergence(m1, m)) + (0.5 * kullback_leibler_divergence(m2, m))
def renyi_half_divergence(m1, m2):
"""
Renyi divergence for parameter 1/2 between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
pm = m1 * m2
spm = np.sqrt(pm)
return -2 * np.log(spm.sum())
def square_frobenius_distance(m1, m2):
"""
Square frobenius distance between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
c = m1 - m2
c = c * c
return c.sum()
def bhattacharyya_distance(m1, m2):
"""
Bhattacharyya distance between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
sum = 0.0
for a, b in zip(m1, m2):
sum += np.sqrt(a*b)
return - np.log(sum)
def hellinger_distance(m1, m2):
"""
Bhattacharyya distance between two probability matrices/vectors
:param m1:
:param m2:
:return:
"""
sum = 0.0
for a, b in zip(m1, m2):
sum += (np.sqrt(a) - np.sqrt(b)) ** 2
return (1/np.log(2)) * np.sqrt(sum) | AMLT-learn | /AMLT-learn-0.2.9.tar.gz/AMLT-learn-0.2.9/amltlearn/metrics/divergences.py | divergences.py |
# AMN

[](https://codecov.io/gh/Deric-W/AMN)
The AMN package implements a simple virtual machine for the AM0 and AM1 instructions sets.
To use it, simply execute it with `python3 -m AMN -i <instruction set> exec path/to/file.txt` to execute the instructions written in a file.
If you want an interactive console just use `python3 -m AMN -i <instruction set> repl`.
## Requirements
Python >= 3.10 is required to use the utility.
## Installation
```sh
python3 -m pip install AMN
```
## Examples
The REPL (read eval print loop) in action:
```
python3 -m AMN -i am0 repl
Welcome the the AM0 REPL, type 'help' for help
AM0 >> exec READ 0
Input: 8
AM0 >> exec READ 1
Input: 42
AM0 >> exec LOAD 0
AM0 >> exec LOAD 1
AM0 >> exec GT
AM0 >> exec JMC 24
AM0 >> status
Counter: 24
Stack: []
Memory:
0 := 8
1 := 42
AM0 >> exit
Exiting REPL...
```
Example program which outputs the biggest of two numbers:
```
READ 0;
READ 1;
LOAD 0;
LOAD 1;
GT;
JMC 10;
LOAD 0;
STORE 2;
JMP 12;
LOAD 1;
STORE 2;
WRITE 2;
```
| AMN | /AMN-0.6.0.tar.gz/AMN-0.6.0/README.md | README.md |
# AMON
[](https://pypi.python.org/pypi/AMON) [](https://travis-ci.org/lozuponelab/AMON) [](https://www.codacy.com/app/lozuponelab/AMON/dashboard) [](https://coveralls.io/github/lozuponelab/AMON)
A command line tool for predicting the compounds produced by microbes and the host.
## Installation
It is recommended to install AMON in a conda environment. The environment can be created by first downloading the environment file.
```bash
wget https://raw.githubusercontent.com/shafferm/AMON/master/environment.yaml
```
Then create a new conda environment. Using the environment file and activate it.
```bash
conda env create -f environment.yaml -n AMON
conda activate AMON
```
Then it can be installed via pip.
```bash
pip install AMON-bio
```
### Alternative installation
Alternatively AMON can be installed from pip directly.
```bash
pip install AMON-bio
```
## Running AMON
AMON includes two scripts. `extract_ko_genome_from_organism.py` takes a KEGG organism flat file and makes a list of KOs present in that file. `AMON.py` predicts the metabolites that could be produced by the KOs used as input. This can be compared to the KOs present in the host or from some other gene set as well as to as set of KEGG metabolites.
### `extract_ko_genome_from_organism.py`
A simple script. Takes a download of an organism file from KEGG or a KEGG organism ID and outputs a new line separate list of KOs present in that file.
```
extract_ko_genome_from_organism.py --help
usage: extract_ko_genome_from_organism.py [-h] -i INPUT -o OUTPUT
[--from_flat_file]
optional arguments:
-h, --help show this help message and exit
-i INPUT, --input INPUT
KEGG organism identifier or KEGG organism flat file
(default: None)
-o OUTPUT, --output OUTPUT
Output file of new line separated list of KOs from
genome (default: None)
--from_flat_file Indicates that input is a flat flile to be parsered
directly (default: False)
```
### `AMON.py`
The full script to preform an analysis of possible metabolites originating from the list of KOs. From this as well as optional lists of compounds detected via metabolomics and lists of KOs present in a host or other environment a table of possible origin of compounds can be generated. From the list of compounds that could possibly be generated a pathway enrichment is also done with the hypergeometric test. Also if either of the other lists are included a Venn diagram will be generated representing the compounds which can be produced or where measured between the lists. If both the bacterial and host KOs are given a heatmap of pathway enrichments will be generated as well and in the enrichment test only compounds which are predicted to be uniquely generated by the bacteria or the host will be used.
#### Inputs
The `gene_set` parameter is a list that can be in the form of a plain text file that is a white space separated list of KO ids, a tsv or csv where the column labels are KO ids or a biom formatted file where the observation ids are KO ids. These are the KOs that will be used to determine the compounds that could be generated by the bacterial community. This and the output directory where all results will be written are the only required requirements. There are two other optional inputs: `detected_compounds` and `other_gene_set`. `detected_compounds` is a set of compounds that where detected in metabolomics of the sample and can come in any of the forms available for the input. `other_gene_set` is a set of KO ids that are encoded by the host or another set of genes that can be expressed as KO ids. This can also take any of the forms available to the input parameter.
Two flags are available that will affect the Venn diagram made and the enrichment analysis that is done. `detected_only` will only include compounds that were detected as the background set of compounds for the hypergeometric test. This flag requires the `compound_detected` variable to be used. The `rn_compound_only` flag makes it so that only detected compounds which have a reaction associated with them in KEGG will be used for both the Venn diagram and the hypergeometric test.
Finally a set of locations for KEGG FTP downloaded files is avaliable. These inputs are optional and if they are not provided the KEGG API will be used to retrieve the records necessary. It is much faster to run with the KEGG FTP downloaded files if you have access to them.
**NOTE: the KEGG API has limits. It is currently past the limits of the KEGG API to require all inputs to be pulled from the KEGG API with a reasonably sized data set. This is something I am working on and if you have any suggestions for how to work within these limits please create an issue or pull request with a fix.**
#### Outputs
All outputs are written to the `output` directory. If only the `input` parameter is given then two files will be generated called origin_table.tsv, kegg_mapper.tsv and bacteria_enrichment.tsv. The origin_table.tsv has rows as the compounds that could be generated and the first column is true or false indicating if the bacterial KOs provided could generate this KO. If the `other_gene_set` input is provided an additional column will be generated in this table with true/false values indicating if this set of KOs could generate these compounds. If the `detected_compounds` parameter is given then an additional column with true/false values indicating whether or not this compound was generated is added.
To visualize the compounds predicted to be produced by microbiome as well as optionally the host and measured compounds the kegg_mapper.tsv file can used. This file can be used as input [here](https://www.genome.jp/kegg/tool/map_pathway2.html). This will color the detected compounds. Blue compounds are generated only by the microbiome and yellow are generated only by the host. Yellow compounds could have been generated by both. Compounds that were detected have an orange outline, with a light orange fill if that compound was not predicted to be produced by microbiome or host.
The bacteria_enrichment.tsv file, and the host_enrichment.tsv file if the `other_gene_set` parameter is given, gives the results of the pathway enrichment analysis from the compounds able to be produced by the KOs provided. When the `other_gene_set` parameter is given a heatmap is made to compare the significant pathways present from the bacteria and host KO lists.
When the `other_gene_set` and/or `detected_compounds` parameters are given a venn diagram will be made to see overlap in compounds possibly generated or detected.
#### Full help
```
amon.py --help
usage: amon.py [-h] -i GENE_SET -o OUTPUT_DIR
[--detected_compounds DETECTED_COMPOUNDS]
[--other_gene_set OTHER_GENE_SET] [--detected_only]
[--rn_compound_only] [--ko_file_loc KO_FILE_LOC]
[--rn_file_loc RN_FILE_LOC] [--co_file_loc CO_FILE_LOC]
[--pathway_file_loc PATHWAY_FILE_LOC] [--save_entries]
[--verbose]
optional arguments:
-h, --help show this help message and exit
-i GENE_SET, --gene_set GENE_SET
KEGG KO's from bacterial community or organism of
interest in the form of a white space separated list,
a tsv or csv with KO ids as column names or a biom
file with KO ids as observations (default: None)
-o OUTPUT_DIR, --output_dir OUTPUT_DIR
directory to store output (default: None)
--detected_compounds DETECTED_COMPOUNDS
list of compounds detected via metabolomics (default:
None)
--other_gene_set OTHER_GENE_SET
white space separated list of KEGG KO's from the host,
another organism or other environment (default: None)
--detected_only only use detected compounds in enrichment analysis
(default: False)
--rn_compound_only only use compounds with associated reactions (default:
False)
--ko_file_loc KO_FILE_LOC
Location of ko file from KEGG FTP download (default:
None)
--rn_file_loc RN_FILE_LOC
Location of reaction file from KEGG FTP download
(default: None)
--co_file_loc CO_FILE_LOC
Location of compound file from KEGG FTP download
(default: None)
--pathway_file_loc PATHWAY_FILE_LOC
Location of pathway file from KEGG FTP download
(default: None)
--save_entries Save json file of KEGG entries at all levels used in
analysis for deeper analysis (default: False)
--verbose verbose output (default: False)
```
| AMON-bio | /AMON-bio-1.0.0.tar.gz/AMON-bio-1.0.0/README.md | README.md |
import argparse
from AMON.predict_metabolites import main
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Primary inputs
parser.add_argument('-i', '--gene_set', help="KEGG KO's from bacterial community or organism of interest in the "
"form of a white space separated list, a tsv or csv with KO ids as "
"column names or a biom file with KO ids as observations",
required=True)
parser.add_argument('-o', '--output_dir', help="directory to store output", required=True)
parser.add_argument('--detected_compounds', help="list of compounds detected via metabolomics")
parser.add_argument('--other_gene_set', help="white space separated list of KEGG KO's from the host, another "
"organism or other environment")
# Names for gene sets
parser.add_argument('--gene_set_name', help="Name to use for first gene set (should have no spaces, underscore "
"separated)")
parser.add_argument('--other_gene_set_name', help="Name to use for second gene set (should have no spaces, "
"underscore separated)")
# Options
parser.add_argument('--keep_separated', help='If input in biom or tabular format keep samples separate for '
'analysis', action='store_true', default=False)
parser.add_argument('--samples_are_columns', help='If data is in tabular format, by default genes are columns and '
'samples rows, to indicate that samples are columns and genes '
'are rows use this flag', action='store_true', default=False)
# Filters
parser.add_argument('--detected_only', help="only use detected compounds in enrichment analysis",
action='store_true', default=False)
parser.add_argument('--rn_compound_only', help="only use compounds with associated reactions", action='store_true',
default=False)
parser.add_argument('--unique_only', help='only use compounds that are unique to a sample in enrichment',
action='store_true', default=False)
# Local KEGG files
parser.add_argument('--ko_file_loc', help='Location of ko file from KEGG FTP download')
parser.add_argument('--rn_file_loc', help='Location of reaction file from KEGG FTP download')
parser.add_argument('--co_file_loc', help='Location of compound file from KEGG FTP download')
parser.add_argument('--pathway_file_loc', help='Location of pathway file from KEGG FTP download')
parser.add_argument('--save_entries', help='Save json file of KEGG entries at all levels used in analysis for '
'deeper analysis', action='store_true', default=False)
args = parser.parse_args()
kos_loc = args.gene_set
output_dir = args.output_dir
detected_compounds = args.detected_compounds
other_kos_loc = args.other_gene_set
if args.gene_set_name is None:
name1 = "gene_set_1"
else:
name1 = args.gene_set_name
if args.other_gene_set_name is None:
name2 = "gene_set_2"
else:
name2 = args.other_gene_set_name
keep_separated = args.keep_separated
samples_are_columns = args.samples_are_columns
detected_compounds_only = args.detected_only
rn_compounds_only = args.rn_compound_only
unique_only = args.unique_only
ko_file_loc = args.ko_file_loc
rn_file_loc = args.rn_file_loc
co_file_loc = args.co_file_loc
pathway_file_loc = args.pathway_file_loc
write_json = args.save_entries
if detected_compounds_only and detected_compounds is None:
raise ValueError('Cannot have detected compounds only and not provide detected compounds')
main(kos_loc, output_dir, other_kos_loc, detected_compounds, name1, name2, keep_separated, samples_are_columns,
detected_compounds_only, rn_compounds_only, unique_only, ko_file_loc=ko_file_loc, rn_file_loc=rn_file_loc,
co_file_loc=co_file_loc, pathway_file_loc=pathway_file_loc, write_json=write_json) | AMON-bio | /AMON-bio-1.0.0.tar.gz/AMON-bio-1.0.0/scripts/amon.py | amon.py |
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn2_circles, venn3, venn3_circles
from scipy.stats import hypergeom
from os import path, makedirs
from statsmodels.sandbox.stats.multicomp import multipletests
import numpy as np
from biom import load_table
import seaborn as sns
import json
from collections import defaultdict, OrderedDict
from datetime import datetime
from KEGG_parser.parsers import parse_ko, parse_rn, parse_co, parse_pathway
from KEGG_parser.downloader import get_kegg_record_dict
sns.set()
# TODO: take multiple files
class Logger(OrderedDict):
""""""
def __init__(self, output):
super(Logger, self).__init__()
self.output_file = output
self['start time'] = datetime.now()
def output_log(self):
with open(self.output_file, 'w') as f:
self['finish time'] = datetime.now()
self['elapsed time'] = self['finish time'] - self['start time']
for key, value in self.items():
f.write(key + ': ' + str(value) + '\n')
def p_adjust(pvalues, method='fdr_bh'):
res = multipletests(pvalues, method=method)
return np.array(res[1], dtype=float)
def read_in_ids(file_loc, keep_separated=False, samples_are_columns=False, name=None):
"""
Read in kos from whitespace separated list (.txt), tsv with KOs as row headers (.tsv/.csv) or biom table (.biom).
"""
if file_loc.endswith('.txt'):
if name is None:
raise ValueError('Name must be given if giving .txt list')
return {name: set([i.strip() for i in open(file_loc).read().split()])}
elif (file_loc.endswith('.tsv') or file_loc.endswith('.csv')) and keep_separated:
genome_table = pd.read_table(file_loc, sep=None, index_col=0, engine='python')
samples_dict = dict()
if samples_are_columns:
genome_table = genome_table.transpose()
for sample in genome_table.index:
samples_dict[sample] = set(genome_table.columns[genome_table.loc[sample].astype(bool)])
return samples_dict
elif file_loc.endswith('.tsv') or file_loc.endswith('.csv'):
if name is None:
raise ValueError('Name must be given if giving .tsv or .csv and not separating')
return {name: set(pd.read_table(file_loc, sep=None, index_col=0, engine='python').columns)}
elif file_loc.endswith('.biom') and keep_separated:
id_table = load_table(file_loc)
samples_dict = dict()
for data, sample, _ in id_table.iter(axis='sample'):
samples_dict[sample] = set(id_table.ids(axis='observation')[data.astype(bool)])
return samples_dict
elif file_loc.endswith('.biom'):
if name is None:
raise ValueError('Name must be given if giving .biom and not separating')
id_table = load_table(file_loc)
# first remove KO's which aren't present in any samples
ids_to_keep = id_table.ids(axis='observation')[id_table.sum(axis='observation') > 0]
id_table.filter(ids_to_keep, axis='observation', inplace=True)
return {name: set(id_table.ids(axis='observation'))}
else:
raise ValueError('Input file %s does not have a parsable file ending.')
def get_rns_from_kos(dict_of_kos: dict, ko_dict: dict):
sample_rns = dict()
for sample, list_of_kos in dict_of_kos.items():
reaction_set = list()
for ko in list_of_kos:
try:
ko_record = ko_dict[ko]
if 'RN' in ko_record['DBLINKS']:
reaction_set += ko_record['DBLINKS']['RN']
except KeyError:
pass
sample_rns[sample] = reaction_set
return sample_rns
def get_products_from_rns(dict_of_rns: dict, rn_dict: dict):
return {sample: set([co for rn in list_of_rns for co in rn_dict[rn]['EQUATION'][1]])
for sample, list_of_rns in dict_of_rns.items()}
def reverse_dict_of_lists(dict_of_lists):
reversed_dict = defaultdict(list)
for key, list_ in dict_of_lists.items():
for item in list_:
reversed_dict[item].append(key)
return reversed_dict
def make_compound_origin_table(sample_cos_produced: dict, cos_measured=None):
columns = list(sample_cos_produced.keys())
rows = list()
cos_to_samples_dict = reverse_dict_of_lists(sample_cos_produced)
for co, samples in cos_to_samples_dict.items():
rows.append([sample in samples for sample in columns])
table = pd.DataFrame(rows, index=cos_to_samples_dict.keys(), columns=columns)
if cos_measured is not None:
table['detected'] = [co in cos_measured for co in table.index]
return table
def merge_dicts_of_lists(*dicts):
merged_dicts = defaultdict(list)
for dict_ in dicts:
for key, list_ in dict_.items():
merged_dicts[key] += list_
return merged_dicts
def make_kegg_mapper_input(sample_ids, detected_ids=None, origin_colors=('blue', 'green', 'yellow'),
detected_color='orange'):
samples = list(sample_ids.keys())
microbe_ids = sample_ids[samples[0]]
if len(samples) == 2:
host_ids = sample_ids[samples[1]]
else:
host_ids = ()
if detected_ids is None:
detected_ids = ()
ids = list()
colors = list()
for id_ in set(microbe_ids) | set(host_ids) | set(detected_ids):
# save id
ids.append(id_)
# check where id is present
microbe_present = id_ in microbe_ids
host_present = id_ in host_ids
detected_present = id_ in detected_ids
origin_color = None
detect_color = None
if microbe_present and host_present:
origin_color = origin_colors[1]
elif microbe_present:
origin_color = origin_colors[0]
elif host_present:
origin_color = origin_colors[2]
else:
pass
if detected_present:
detect_color = detected_color
color = ''
if origin_color is not None:
color += origin_color
if detect_color is not None:
color += ',%s' % detect_color
colors.append(color)
df = pd.Series(colors, index=ids)
return df
def make_venn(sample_cos_produced, measured_cos=None, output_loc=None, name1='gene_set_1', name2='gene_set_2'):
samples = list(sample_cos_produced.keys())
bac_cos = sample_cos_produced[samples[0]]
if len(samples) == 2:
host_cos = sample_cos_produced[samples[1]]
else:
host_cos = None
if host_cos is None and measured_cos is None:
raise ValueError("Must give host_cos or measured_cos to make venn diagram")
if host_cos is not None and measured_cos is None:
_ = venn2((set(bac_cos), set(host_cos)),
("Compounds predicted\nproduced by %s" % name1.replace('_', ' '),
"Compounds predicted\nproduced by %s" % name2.replace('_', ' ')),
set_colors=('white',)*2)
_ = venn2_circles((set(bac_cos), set(host_cos)), linestyle='solid')
elif host_cos is None and measured_cos is not None:
_ = venn2((set(bac_cos), set(measured_cos)),
("Compounds predicted\nproduced by %s" % name1.replace('_', ' '), "Compounds measured"),
set_colors=('white',)*2)
_ = venn2_circles((set(bac_cos), set(measured_cos)), linestyle='solid')
else:
_ = venn3((set(measured_cos), set(bac_cos), set(host_cos)),
("Compounds measured", "Compounds predicted\nproduced by %s" % name1.replace('_', ' '),
"Compounds predicted\nproduced by %s" % name2.replace('_', ' ')),
set_colors=('white',)*3)
_ = venn3_circles((set(measured_cos), set(bac_cos), set(host_cos)), linestyle='solid')
if output_loc is not None:
plt.savefig(output_loc, bbox_inches='tight', dpi=300)
else:
plt.show()
def get_pathways_from_cos(co_dict):
pathway_list = list()
for co_record in co_dict.values():
if 'PATHWAY' in co_record:
pathway_list += [pathway[0] for pathway in co_record['PATHWAY']]
return set(pathway_list)
def get_unique_from_dict_of_lists(dict_of_lists):
unique_dict_of_lists = dict()
for key, list_ in dict_of_lists.items():
all_other_values = set([value for other_key, other_list in dict_of_lists.items() for value in other_list
if other_key != key])
unique_dict_of_lists[key] = set(list_) - all_other_values
return unique_dict_of_lists
def get_pathway_to_co_dict(pathway_dict, no_drug=True, no_glycan=True):
pathway_to_co_dict = {pathway_record['NAME']: [compound[0] for compound in pathway_record['COMPOUND']]
for pathway_record in pathway_dict.values() if 'COMPOUND' in pathway_record}
if no_drug:
pathway_to_co_dict = {pathway: [co for co in cos if not co.startswith('D')]
for pathway, cos in pathway_to_co_dict.items()}
if no_glycan:
pathway_to_co_dict = {pathway: [co for co in cos if not co.startswith('G')]
for pathway, cos in pathway_to_co_dict.items()}
return pathway_to_co_dict
def calculate_enrichment(cos, co_pathway_dict, min_pathway_size=10):
all_cos = set([co for co_list in co_pathway_dict.values() for co in co_list])
pathway_names = list()
pathway_data = list()
for pathway, pathway_cos in co_pathway_dict.items():
pathway_present = set(pathway_cos)
if len(pathway_present) > min_pathway_size:
overlap = set(cos) & pathway_present
prob = hypergeom.sf(len(overlap), len(all_cos), len(pathway_present), len(set(cos)))
pathway_names.append(pathway)
pathway_data.append([len(pathway_present), len(overlap), prob])
enrichment_table = pd.DataFrame(pathway_data, index=pathway_names,
columns=["pathway size", "overlap", "probability"])
enrichment_table['adjusted probability'] = p_adjust(enrichment_table.probability)
if np.any((enrichment_table['adjusted probability'] < .05) & (enrichment_table['overlap'] == 0)):
return None
else:
return enrichment_table.sort_values('adjusted probability')
def make_enrichment_clustermap(pathway_enrichment_dfs: dict, key, output_loc, min_p=.1, log=False):
enrichment_p_df = pd.DataFrame.from_dict({sample: pathway_enrichment_df[key] for sample, pathway_enrichment_df in
pathway_enrichment_dfs.items()})
enrichment_p_df = enrichment_p_df.loc[enrichment_p_df.index[(enrichment_p_df<min_p).sum(axis=1) > 0]]
enrichment_p_df = enrichment_p_df[enrichment_p_df.columns[(enrichment_p_df<min_p).sum(axis=0) > 0]]
if log:
enrichment_p_df = np.log(enrichment_p_df)
g = sns.clustermap(enrichment_p_df, col_cluster=False, figsize=(2, 12), cmap="Blues_r", method="average")
_ = plt.setp(g.ax_heatmap.get_xticklabels(), rotation=340, fontsize=12, ha="left")
_ = plt.setp(g.ax_heatmap.get_yticklabels(), rotation=0, fontsize=12)
plt.savefig(output_loc, dpi=500, bbox_inches='tight')
def main(kos_loc, output_dir, other_kos_loc=None, compounds_loc=None, name1='gene_set_1', name2='gene_set_2',
keep_separated=False, samples_are_columns=False, detected_only=False, rxn_compounds_only=False,
unique_only=True, ko_file_loc=None, rn_file_loc=None, co_file_loc=None, pathway_file_loc=None,
write_json=False):
# create output dir to throw error quick
makedirs(output_dir)
logger = Logger(path.join(output_dir, "AMON_log.txt"))
# read in all kos and get records
sample_kos = read_in_ids(kos_loc, keep_separated=keep_separated,
samples_are_columns=samples_are_columns, name=name1)
logger['kos_loc'] = path.abspath(kos_loc)
if other_kos_loc is not None:
sample_kos.update(read_in_ids(other_kos_loc, keep_separated=keep_separated,
samples_are_columns=samples_are_columns, name=name2))
logger['other_kos_loc'] = path.abspath(other_kos_loc)
all_kos = set([value for values in sample_kos.values() for value in values])
logger['Number of samples'] = len(sample_kos)
logger['Total number of KOs'] = len(all_kos)
ko_dict = get_kegg_record_dict(set(all_kos), parse_ko, ko_file_loc)
if write_json:
open(path.join(output_dir, 'ko_dict.json'), 'w').write(json.dumps(ko_dict))
logger['KO json location'] = path.abspath(path.join(output_dir, 'ko_dict.json'))
# get all reactions from kos
sample_rns = get_rns_from_kos(sample_kos, ko_dict)
all_rns = set([value for values in sample_rns.values() for value in values])
logger['Total number of reactions'] = len(all_rns)
# get reactions from kegg
rn_dict = get_kegg_record_dict(set(all_rns), parse_rn, rn_file_loc)
if write_json:
open(path.join(output_dir, 'rn_dict.json'), 'w').write(json.dumps(rn_dict))
logger['RN json location'] = path.abspath(path.join(output_dir, 'rn_dict.json'))
# Get reactions from KEGG and pull cos produced
sample_cos_produced = get_products_from_rns(sample_rns, rn_dict)
# read in compounds that were measured if available
if compounds_loc is not None:
cos_measured = list(read_in_ids(compounds_loc, name='Compounds', keep_separated=False).values())[0]
logger['compounds_loc'] = path.abspath(compounds_loc)
else:
cos_measured = None
# make compound origin table
origin_table = make_compound_origin_table(sample_cos_produced, cos_measured)
# get rid of any all false columns
origin_table = origin_table[origin_table.columns[origin_table.sum().astype(bool)]]
origin_table.to_csv(path.join(output_dir, 'origin_table.tsv'), sep='\t')
logger['Origin table location'] = path.abspath(path.join(output_dir, 'origin_table.tsv'))
# make kegg mapper input if 2 or fewer samples
if len(sample_cos_produced) <= 2:
kegg_mapper_input = make_kegg_mapper_input(merge_dicts_of_lists(sample_kos, sample_cos_produced), cos_measured)
kegg_mapper_input.to_csv(path.join(output_dir, 'kegg_mapper.tsv'), sep='\t')
logger['KEGG mapper location'] = path.abspath(path.join(output_dir, 'kegg_mapper.tsv'))
# Get full set of compounds
all_cos_produced = set([value for values in sample_cos_produced.values() for value in values])
logger['Number of cos produced across samples'] = len(all_cos_produced)
if detected_only:
all_cos_produced = set(all_cos_produced) | set(cos_measured)
logger['Number of cos produced and detected'] = len(all_cos_produced)
# Get compound data from kegg
co_dict = get_kegg_record_dict(all_cos_produced, parse_co, co_file_loc)
if write_json:
open(path.join(output_dir, 'co_dict.json'), 'w').write(json.dumps(co_dict))
# remove compounds without reactions if required
if rxn_compounds_only:
cos_with_rxn = list()
for compound, record in co_dict.items():
if 'REACTION' in record:
cos_with_rxn.append(compound)
cos_measured = set(cos_measured) & set(cos_with_rxn)
# Make venn diagram
if (compounds_loc is not None or len(sample_cos_produced) > 1) and len(sample_cos_produced) <= 2:
make_venn(sample_cos_produced, cos_measured, path.join(output_dir, 'venn.png'))
# Filter compounds down to only cos measured for cos produced and other cos produced
if detected_only:
sample_cos_produced = {sample: set(cos_produced) & set(cos_measured) for sample, cos_produced
in sample_cos_produced.items()}
# find compounds unique to microbes and to host if host included
if unique_only:
sample_cos_produced = get_unique_from_dict_of_lists(sample_cos_produced)
# Get pathway info from pathways in compounds
all_pathways = [pathway.replace('map', 'ko') for pathway in get_pathways_from_cos(co_dict)]
pathway_dict = get_kegg_record_dict(all_pathways, parse_pathway, pathway_file_loc)
pathway_to_compound_dict = get_pathway_to_co_dict(pathway_dict, no_glycan=False)
# calculate enrichment
pathway_enrichment_dfs = dict()
for sample, cos_produced in sample_cos_produced.items():
pathway_enrichment_df = calculate_enrichment(cos_produced, pathway_to_compound_dict)
if pathway_enrichment_df is not None:
pathway_enrichment_df.to_csv(path.join(output_dir, '%s_compound_pathway_enrichment.tsv' % sample), sep='\t')
logger['%s pathway enrichment'] = path.abspath(path.join(output_dir,
'%s_compound_pathway_enrichment.tsv' % sample))
pathway_enrichment_dfs[sample] = pathway_enrichment_df
if len(pathway_enrichment_dfs) > 0:
make_enrichment_clustermap(pathway_enrichment_dfs, 'adjusted probability',
path.join(output_dir, 'enrichment_heatmap.png'))
logger['Enrichment clustermap location'] = path.abspath(path.join(output_dir, 'enrichment_heatmap.png'))
logger.output_log() | AMON-bio | /AMON-bio-1.0.0.tar.gz/AMON-bio-1.0.0/AMON/predict_metabolites.py | predict_metabolites.py |
import itertools, json, random
def result_chart_js(exam_logs_json, color_json = None) :
'''
color_json = {
"tag1" : "#2e5355"
"tag2" : "#c348f1"
}
:param student_json:
:param test_json:
:param color_json:
:return: chart.js json
'''
exam_logs_json = json.loads(exam_logs_json)
all_tag = [q['tags'] for e in exam_logs_json['exam_logs'] for q in e['exam']['questions']]
all_tag = list(itertools.chain(*all_tag))
all_tag = [x['name'] for x in all_tag]
all_tag = list(set(all_tag))
all_tag = sorted(all_tag)
prob_nums = [len(e['exam']['questions']) for e in exam_logs_json['exam_logs']]
#accs = [[cor for ans, cor in zip(t_ans['answer'], test['problems']) if ans == cor['correctanswer']] for t_ans, test in zip(student_json['test'], test_json)]
accs = []
for i, e in enumerate(exam_logs_json['exam_logs']) :
accs.append([])
for j, cor in enumerate(e['exam']['questions']) :
if j < len(e['answers']) and e['answers'][j]['id'] == cor['response']['answer']['id'] :
accs[i].append(cor)
for acc in accs :
for p in acc :
tag_list = []
for t in p['tags'] :
tag_list.append(t['name'])
p['tags'] = tag_list
print(accs)
tag_cor = {}
for t in all_tag :
acc_scores = [sum([1 / len(p['tags']) for p in acc if t in p['tags']]) * 100.0 / prob_nums[i] for i, acc in enumerate(accs)]
tag_cor[t] = acc_scores
print(tag_cor)
chart_dic = {}
chart_dic['type'] = "bar"
data_dic = {}
data_dic['labels'] = [t['exam']['name'] for t in exam_logs_json['exam_logs']]
if color_json == None :
data_dic['datasets'] = [{'label':t, 'data':s, 'backgroundColor':'#'+hex(random.randrange(16777215))[2:]} for t, s in tag_cor.items()]
else :
data_dic['datasets'] = [{'label':t, 'data':s, 'backgroundColor':color_json[t]} for t, s in tag_cor.items()]
chart_dic['data'] = data_dic
option_dic= {}
option_dic['scales'] = {'xAxes':[{'stacked':'true'}], 'yAxes':[{'stacked':'true', 'ticks': {'max':100}}]}
chart_dic['options'] = option_dic
chart_json = json.dumps(chart_dic)
return chart_json
if __name__ == "__main__" :
c_json = result_chart_js('{"id": 9, "exam_logs": [{"id": 39, "answers": [{"id": 4, "text": "\\u314b"}, {"id": 31, "text": "\\u3131, \\u3139"}, {"id": 36, "text": "21"}, {"id": 41, "text": "top down"}], "exam": {"id": 3, "questions": [{"id": 6, "response": {"id": 5, "choices": [{"id": 2, "text": "\\u3148"}, {"id": 3, "text": "\\u314a"}, {"id": 4, "text": "\\u314b"}, {"id": 5, "text": "\\u314c"}, {"id": 25, "text": "\\u3145"}], "answer": {"id": 2, "text": "\\u3148"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 5, "blocks": [{"id": 5, "text": "\\uac00\\uc744\\uc774\\ub294 \\ubc31\\ud654\\uc810 \\uc548\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc5d0\\uc11c \\uc601\\ud654\\ub97c \\ubcf4\\ub824\\uace0 \\ud569\\ub2c8\\ub2e4. \\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc21c\\uc11c\\ub85c \\uc7a5\\uc18c\\ub97c \\ucc3e\\uc744 \\uc218 \\uc788\\ub2e4.\\r\\n\\r\\n\\ubc31\\ud654\\uc810 \\ub0b4\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc744 \\ucc3e\\uae30 \\uc704\\ud55c \\ubc29\\ubc95\\uc740 \\ub2e4\\uc74c\\uacfc \\uac19\\ub2e4.\\r\\n\\ubc31\\ud654\\uc810 1\\uce35\\uc5d0 \\uc788\\ub294 iMAP\\uc73c\\ub85c \\uac04\\ub2e4. \\uadf8 \\ub2e4\\uc74c \\uac00\\uace0\\uc790 \\ud558\\ub294 \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4. \\r\\niMAP\\uc5d0\\uc11c \\uc7a5\\uc18c\\uc5d0 \\ub300\\ud55c \\uc704\\uce58\\uc815\\ubcf4\\ub97c 2\\uac00\\uc9c0 \\ud78c\\ud2b8\\ub85c \\uc81c\\uacf5\\ub41c\\ub2e4. \\r\\n\\r\\n\\uccab \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc7a5\\uc18c\\uac00 \\uba87 \\uce35\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\ud55c\\uae00\\ub85c \\uc54c\\ub824\\uc8fc\\uace0,\\r\\n\\ub450 \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc5b4\\ub290 \\uc704\\uce58\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\uc22b\\uc790\\uc815\\ubcf4\\ub85c \\uc54c\\ub824\\uc900\\ub2e4.\\r\\n\\r\\n\\uc608\\ub97c \\ub4e4\\uc5b4, \\u3131\\uc774\\uba74 1\\uce35\\uc774\\uace0 \\u3141\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\r\\n\\uc601\\ud654\\uad00\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\uc704\\uce58 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uc601\\ud654\\uad00\\uc740 \\uba87 \\uce35\\uc5d0 \\uc788\\uc2b5\\ub2c8\\uae4c?\\r\\n\\r\\n[ \\uc601\\ud654\\uad00: \\u3137 + \\u3147 - \\u3131 ]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 1, "name": "\\ubd84\\uc11d\\ub2a5\\ub825"}], "name": "among_middle1"}, {"id": 7, "response": {"id": 6, "choices": [{"id": 29, "text": "\\u3131, \\u3134"}, {"id": 30, "text": "\\u3131, \\u3137"}, {"id": 31, "text": "\\u3131, \\u3139"}, {"id": 32, "text": "\\u3131, \\u3141"}, {"id": 33, "text": "\\u3131, \\u3134, \\u3137, \\u3139, \\u3141"}], "answer": {"id": 31, "text": "\\u3131, \\u3139"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 6, "blocks": [{"id": 6, "text": "\\uac00\\uc744\\uc774\\ub294 \\uc601\\ud654\\ub97c \\ub2e4 \\ubcf8 \\ud6c4 \\uc5b4\\uba38\\ub2c8\\uc640 \\uc2dd\\ud488\\uad00\\uc5d0\\uc11c \\ub9cc\\ub098\\uae30\\ub85c \\ud588\\ub2e4. \\ub2e4\\uc74c\\uc740 \\uac00\\uc744\\uc774\\uac00 \\uc2dd\\ud488\\uad00\\uc5d0 \\uac00\\uae30 \\uc704\\ud574\\uc11c \\uace0\\ub824\\ud574\\uc57c \\ud560 \\uc804\\uc81c\\uc870\\uac74\\uc5d0 \\ub300\\ud574 \\ub098\\uc5f4\\ud55c \\uac83\\uc774\\ub2e4. \\ubc18\\ub4dc\\uc2dc \\ud544\\uc694\\ud55c \\uc870\\uac74\\uc73c\\ub85c\\ub9cc \\ubb36\\uc778 \\uac83\\uc740? \\r\\n\\r\\n\\r\\n\\u3131. iMAP\\uc5d0\\uc11c \\uc2dd\\ud488\\uad00\\uc744 \\uac80\\uc0c9\\ud588\\ub294\\uac00?\\r\\n\\u3134. iMAP\\uc774 1\\uce35 \\uc678\\uc5d0 \\ub2e4\\ub978 \\uacf3\\uc5d0\\ub3c4 \\uc788\\ub294\\uac00?\\r\\n\\u3137. \\uc5b4\\uba38\\ub2c8\\ub294 \\uc2dd\\ud488\\uad00 \\uc911 \\uc5b4\\ub290 \\ucf54\\ub108\\uc5d0 \\uc788\\ub294\\uac00?\\r\\n\\u3139. \\uc704\\uce58 \\uc815\\ubcf4\\uc5d0 \\ub300\\ud55c \\ud55c\\uae00 \\ud78c\\ud2b8\\uac00 \\uc8fc\\uc5b4\\uc84c\\ub294\\uac00?\\r\\n\\u3141. \\uc5d8\\ub9ac\\ubca0\\uc774\\ud130 \\uc791\\ub3d9\\uc740 \\uc6d0\\ud65c\\ud55c\\uac00?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 2, "name": "\\ubaa8\\ub378\\ub9c1 \\ub2a5\\ub825"}], "name": "among_middle2"}, {"id": 8, "response": {"id": 7, "choices": [{"id": 34, "text": "3"}, {"id": 35, "text": "20"}, {"id": 36, "text": "21"}, {"id": 37, "text": "22"}, {"id": 38, "text": "23"}], "answer": {"id": 37, "text": "22"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 7, "blocks": [{"id": 7, "text": "\\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc77c\\ub828\\uc758 \\uc21c\\uc11c\\ub85c \\uc2dd\\ud488\\uad00\\uc744 \\ucc3e\\uc73c\\ub824\\uace0 \\ud55c\\ub2e4. \\r\\n\\r\\n1) iMAP\\uc73c\\ub85c \\uac00\\uc11c \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n2) \\uc704\\uce58\\uc815\\ubcf4\\ub97c \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n3) \\ud55c\\uae00 \\ud78c\\ud2b8\\uc5d0 \\ub300\\ud55c \\uc815\\ubcf4\\ub97c \\ud574\\ub3c5\\ud558\\uc5ec \\uc2dd\\ud488\\uad00\\uc774 \\uba87 \\uce35\\uc778\\uc9c0 \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n(\\ub2e8, \\uac01 \\uae00\\uc790\\ub294 \\u3131\\ubd80\\ud130 \\uc2dc\\uc791\\ud558\\uc5ec \\ubc18\\ubcf5\\ud69f\\uc218\\ub97c \\uacc4\\uc0b0\\ud55c\\ub2e4. \\uc608\\ub97c \\ub4e4\\uc5b4, \\u3137\\uc758 \\uacbd\\uc6b0 \\ubc18\\ubcf5\\ud69f\\uc218\\ub294 3\\ubc88\\uc774\\ub2e4.)\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n \\u2193\\r\\n4) \\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\ub97c \\ud655\\uc778\\ud558\\uc5ec \\uc7a5\\uc18c\\ub97c \\uc774\\ub3d9\\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n\\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\ud55c\\uae00 \\ud78c\\ud2b8 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uacfc\\uc815 3)\\uc744 \\uba87 \\ubc88 \\ubc18\\ubcf5\\ud574\\uc57c \\ud560\\uae4c?\\r\\n\\r\\n[ \\uc2dd\\ud488\\uad00: \\u3145 + \\u3141 - \\u314a ]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 4, "name": "\\uad6c\\ud604\\ub2a5\\ub825"}], "name": "among_middle3"}, {"id": 9, "response": {"id": 8, "choices": [{"id": 39, "text": "bottom up"}, {"id": 40, "text": "top up up"}, {"id": 41, "text": "top down"}, {"id": 42, "text": "top left"}, {"id": 43, "text": "right up"}], "answer": {"id": 43, "text": "right up"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 8, "blocks": [{"id": 8, "text": "\\uac00\\uc744\\uc774 \\uc5b4\\uba38\\ub2c8\\uac00 \\uc2dd\\ud488\\uad00 \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc5d0 \\uc788\\ub2e4\\uace0 \\ud560 \\ub54c, \\uacfc\\uc77c\\ucf54\\ub108\\ub85c \\uac00\\uae30 \\uc704\\ud574\\uc11c\\ub294 \\ub2e4\\uc74c\\uc758 \\uc554\\ud638\\ub97c \\ud574\\ub3c5\\ud574\\uc57c \\ud55c\\ub2e4. \\r\\n\\r\\n[ 18978202116 ]\\r\\n\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc554\\ud638\\uc758 \\uc22b\\uc790\\ub294 \\uac01\\uac01 \\ucd1d 7\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\uc5d0 \\ub300\\uc751\\ub418\\uace0 1,5,6,7\\ubc88\\uc9f8 \\ubb38\\uc790\\ub294 2\\uc790\\ub9ac \\uc218\\ub85c \\ub098\\ud0c0\\ub09c\\ub2e4. \\ub2e4\\uc74c \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc758 \\uc704\\uce58\\ub294 \\uc5b4\\ub514\\uc77c\\uae4c?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 3, "name": "\\uc77c\\ubc18\\ud654 \\ub2a5\\ub825"}], "name": "among_middle4"}], "name": "middle"}}, {"id": 52, "answers": [{"id": 3, "text": "\\u314a"}], "exam": {"id": 1, "questions": [{"id": 1, "response": {"id": 1, "choices": [{"id": 2, "text": "\\u3148"}, {"id": 3, "text": "\\u314a"}, {"id": 4, "text": "\\u314b"}, {"id": 5, "text": "\\u314c"}, {"id": 6, "text": "\\u314d"}], "answer": {"id": 3, "text": "\\u314a"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 1, "blocks": [{"id": 1, "text": "\\uac00\\uc744\\uc774\\ub294 \\ubc31\\ud654\\uc810 \\uc548\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc5d0\\uc11c \\uc601\\ud654\\ub97c \\ubcf4\\ub824\\uace0 \\ud569\\ub2c8\\ub2e4. \\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc21c\\uc11c\\ub85c \\uc7a5\\uc18c\\ub97c \\ucc3e\\uc744 \\uc218 \\uc788\\ub2e4.\\r\\n\\r\\n\\ubc31\\ud654\\uc810 \\ub0b4\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc744 \\ucc3e\\uae30 \\uc704\\ud55c \\ubc29\\ubc95\\uc740 \\ub2e4\\uc74c\\uacfc \\uac19\\ub2e4.\\r\\n\\ubc31\\ud654\\uc810 1\\uce35\\uc5d0 \\uc788\\ub294 iMAP\\uc73c\\ub85c \\uac04\\ub2e4. \\uadf8 \\ub2e4\\uc74c \\uac00\\uace0\\uc790 \\ud558\\ub294 \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4. \\r\\niMAP\\uc5d0\\uc11c \\uc7a5\\uc18c\\uc5d0 \\ub300\\ud55c \\uc704\\uce58\\uc815\\ubcf4\\ub97c 2\\uac00\\uc9c0 \\ud78c\\ud2b8\\ub85c \\uc81c\\uacf5\\ub41c\\ub2e4. \\r\\n\\r\\n\\uccab \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc7a5\\uc18c\\uac00 \\uba87 \\uce35\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\ud55c\\uae00\\ub85c \\uc54c\\ub824\\uc8fc\\uace0,\\r\\n\\ub450 \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc5b4\\ub290 \\uc704\\uce58\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\uc22b\\uc790\\uc815\\ubcf4\\ub85c \\uc54c\\ub824\\uc900\\ub2e4.\\r\\n\\r\\n\\uc608\\ub97c \\ub4e4\\uc5b4, \\u3131\\uc774\\uba74 1\\uce35\\uc774\\uace0 \\u3141\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\ud55c\\uae00\\uacfc \\ub3d9\\uc77c\\ud558\\uac8c A\\uc774\\uba74 1\\uce35\\uc774\\uace0 E\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc601\\ud654\\uad00\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\uc704\\uce58 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uc601\\ud654\\uad00\\uc740 \\uba87 \\uce35\\uc5d0 \\uc788\\uc2b5\\ub2c8\\uae4c? \\r\\n\\r\\n\\r\\n[ \\uc601\\ud654\\uad00 : \\u3137 + \\u3147 ]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 1, "name": "\\ubd84\\uc11d\\ub2a5\\ub825"}], "name": "among_elementary1"}, {"id": 2, "response": {"id": 2, "choices": [{"id": 1, "text": "\\u3131, \\u3134"}, {"id": 7, "text": "\\u3131, \\u3134, \\u3137, \\u3139, \\u3141"}, {"id": 8, "text": "\\u3131, \\u3134, \\u3137"}, {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, {"id": 10, "text": "\\u3131, \\u3134, \\u3141"}], "answer": {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 2, "blocks": [{"id": 2, "text": "\\uac00\\uc744\\uc774\\ub294 \\uc601\\ud654\\ub97c \\ub2e4 \\ubcf8 \\ud6c4 \\uc5b4\\uba38\\ub2c8\\uc640 \\uc2dd\\ud488\\uad00\\uc5d0\\uc11c \\ub9cc\\ub098\\uae30\\ub85c \\ud588\\ub2e4. \\ub2e4\\uc74c\\uc740 \\uac00\\uc744\\uc774\\uac00 \\uc2dd\\ud488\\uad00\\uc5d0 \\uac00\\uae30 \\uc704\\ud574\\uc11c \\uace0\\ub824\\ud574\\uc57c \\ud560 \\uc804\\uc81c\\uc870\\uac74\\uc5d0 \\ub300\\ud574 \\ub098\\uc5f4\\ud55c \\uac83\\uc774\\ub2e4. \\ubc18\\ub4dc\\uc2dc \\ud544\\uc694\\ud55c \\uc870\\uac74\\uc73c\\ub85c\\ub9cc \\ubb36\\uc778 \\uac83\\uc740? \\r\\n\\r\\n\\r\\n\\r\\n\\u3131. \\uc5b4\\ub290 \\uc7a5\\uc18c\\ub97c \\uac00\\uc57c\\ud558\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3134. iMAP\\uc774 \\uc5b4\\ub514\\uc788\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3137. \\ubc31\\ud654\\uc810 \\uc6b4\\uc601\\uc2dc\\uac04\\uc744 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3139. \\uc704\\uce58 \\uc815\\ubcf4\\uc5d0 \\ub300\\ud55c \\ud78c\\ud2b8\\ub97c \\ud574\\ub3c5\\ud560 \\uc218 \\uc788\\ub294\\uac00?\\r\\n\\u3141. \\uc5d8\\ub9ac\\ubca0\\uc774\\ud130 \\uc791\\ub3d9\\uc740 \\uc6d0\\ud65c\\ud55c\\uac00?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 2, "name": "\\ubaa8\\ub378\\ub9c1 \\ub2a5\\ub825"}], "name": "among_elementary2"}, {"id": 3, "response": {"id": 3, "choices": [{"id": 11, "text": "2"}, {"id": 12, "text": "5"}, {"id": 13, "text": "7"}, {"id": 14, "text": "12"}, {"id": 15, "text": "13"}], "answer": {"id": 14, "text": "12"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 3, "blocks": [{"id": 3, "text": "\\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc77c\\ub828\\uc758 \\uc21c\\uc11c\\ub85c \\uc2dd\\ud488\\uad00\\uc744 \\ucc3e\\uc73c\\ub824\\uace0 \\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n1) iMAP\\uc73c\\ub85c \\uac00\\uc11c \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n2) \\uc704\\uce58\\uc815\\ubcf4\\ub97c \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n3) \\ud55c\\uae00 \\ud78c\\ud2b8\\uc5d0 \\ub300\\ud55c \\uc815\\ubcf4\\ub97c \\ud574\\ub3c5\\ud558\\uc5ec \\uc2dd\\ud488\\uad00\\uc774 \\uba87 \\uce35\\uc778\\uc9c0 \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n(\\ub2e8, \\uac01 \\uae00\\uc790\\ub294 \\u3131\\ubd80\\ud130 \\uc2dc\\uc791\\ud558\\uc5ec \\ubc18\\ubcf5\\ud69f\\uc218\\ub97c \\uacc4\\uc0b0\\ud55c\\ub2e4. \\uc608\\ub97c \\ub4e4\\uc5b4, \\u3137\\uc758 \\uacbd\\uc6b0 \\ubc18\\ubcf5\\ud69f\\uc218\\ub294 3\\ubc88\\uc774\\ub2e4.)\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n \\u2193\\r\\n4) \\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\ub97c \\ud655\\uc778\\ud558\\uc5ec \\uc7a5\\uc18c\\ub97c \\uc774\\ub3d9\\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n\\r\\n\\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\ud55c\\uae00 \\ud78c\\ud2b8 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uacfc\\uc815 3)\\uc744 \\uba87 \\ubc88 \\ubc18\\ubcf5\\ud574\\uc57c \\ud560\\uae4c? \\r\\n\\r\\n[ \\uc2dd\\ud488\\uad00: \\u3145 + \\u3141]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 4, "name": "\\uad6c\\ud604\\ub2a5\\ub825"}], "name": "among_elementary3"}, {"id": 4, "response": {"id": 4, "choices": [{"id": 16, "text": "bottom up"}, {"id": 17, "text": "top up up"}, {"id": 18, "text": "top down"}, {"id": 19, "text": "top left"}, {"id": 20, "text": "right up"}], "answer": {"id": 20, "text": "right up"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 4, "blocks": [{"id": 4, "text": "\\uac00\\uc744\\uc774 \\uc5b4\\uba38\\ub2c8\\uac00 \\uc2dd\\ud488\\uad00 \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc5d0 \\uc788\\ub2e4\\uace0 \\ud560 \\ub54c, \\uacfc\\uc77c\\ucf54\\ub108\\ub85c \\uac00\\uae30 \\uc704\\ud574\\uc11c\\ub294 \\ub2e4\\uc74c\\uc758 \\uc554\\ud638\\ub97c \\ud574\\ub3c5\\ud574\\uc57c \\ud55c\\ub2e4. \\r\\n\\r\\n[ 18090708202116 ]\\r\\n\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc554\\ud638\\uc758 \\uc22b\\uc790\\ub294 \\ub450 \\uc790\\ub9ac\\uc529 \\ub04a\\uc5b4\\uc11c \\ud574\\ub3c5\\ud558\\uace0 \\uac01\\uac01 1\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\uc5d0 \\ub300\\uc751\\ub41c\\ub2e4. \\uc554\\ud638\\uac00 \\ucd1d 7\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\ub85c \\uc774\\ub8e8\\uc5b4\\uc84c\\ub2e4\\uace0 \\ud560 \\ub54c, \\ub2e4\\uc74c \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc758 \\uc704\\uce58\\ub294 \\uc5b4\\ub514\\uc77c\\uae4c?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 3, "name": "\\uc77c\\ubc18\\ud654 \\ub2a5\\ub825"}], "name": "among_elementary4"}], "name": "elementary"}}, {"id": 53, "answers": [{"id": 3, "text": "\\u314a"}, {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, {"id": 13, "text": "7"}, {"id": 18, "text": "top down"}], "exam": {"id": 1, "questions": [{"id": 1, "response": {"id": 1, "choices": [{"id": 2, "text": "\\u3148"}, {"id": 3, "text": "\\u314a"}, {"id": 4, "text": "\\u314b"}, {"id": 5, "text": "\\u314c"}, {"id": 6, "text": "\\u314d"}], "answer": {"id": 3, "text": "\\u314a"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 1, "blocks": [{"id": 1, "text": "\\uac00\\uc744\\uc774\\ub294 \\ubc31\\ud654\\uc810 \\uc548\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc5d0\\uc11c \\uc601\\ud654\\ub97c \\ubcf4\\ub824\\uace0 \\ud569\\ub2c8\\ub2e4. \\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc21c\\uc11c\\ub85c \\uc7a5\\uc18c\\ub97c \\ucc3e\\uc744 \\uc218 \\uc788\\ub2e4.\\r\\n\\r\\n\\ubc31\\ud654\\uc810 \\ub0b4\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc744 \\ucc3e\\uae30 \\uc704\\ud55c \\ubc29\\ubc95\\uc740 \\ub2e4\\uc74c\\uacfc \\uac19\\ub2e4.\\r\\n\\ubc31\\ud654\\uc810 1\\uce35\\uc5d0 \\uc788\\ub294 iMAP\\uc73c\\ub85c \\uac04\\ub2e4. \\uadf8 \\ub2e4\\uc74c \\uac00\\uace0\\uc790 \\ud558\\ub294 \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4. \\r\\niMAP\\uc5d0\\uc11c \\uc7a5\\uc18c\\uc5d0 \\ub300\\ud55c \\uc704\\uce58\\uc815\\ubcf4\\ub97c 2\\uac00\\uc9c0 \\ud78c\\ud2b8\\ub85c \\uc81c\\uacf5\\ub41c\\ub2e4. \\r\\n\\r\\n\\uccab \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc7a5\\uc18c\\uac00 \\uba87 \\uce35\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\ud55c\\uae00\\ub85c \\uc54c\\ub824\\uc8fc\\uace0,\\r\\n\\ub450 \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc5b4\\ub290 \\uc704\\uce58\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\uc22b\\uc790\\uc815\\ubcf4\\ub85c \\uc54c\\ub824\\uc900\\ub2e4.\\r\\n\\r\\n\\uc608\\ub97c \\ub4e4\\uc5b4, \\u3131\\uc774\\uba74 1\\uce35\\uc774\\uace0 \\u3141\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\ud55c\\uae00\\uacfc \\ub3d9\\uc77c\\ud558\\uac8c A\\uc774\\uba74 1\\uce35\\uc774\\uace0 E\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc601\\ud654\\uad00\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\uc704\\uce58 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uc601\\ud654\\uad00\\uc740 \\uba87 \\uce35\\uc5d0 \\uc788\\uc2b5\\ub2c8\\uae4c? \\r\\n\\r\\n\\r\\n[ \\uc601\\ud654\\uad00 : \\u3137 + \\u3147 ]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 1, "name": "\\ubd84\\uc11d\\ub2a5\\ub825"}], "name": "among_elementary1"}, {"id": 2, "response": {"id": 2, "choices": [{"id": 1, "text": "\\u3131, \\u3134"}, {"id": 7, "text": "\\u3131, \\u3134, \\u3137, \\u3139, \\u3141"}, {"id": 8, "text": "\\u3131, \\u3134, \\u3137"}, {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, {"id": 10, "text": "\\u3131, \\u3134, \\u3141"}], "answer": {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 2, "blocks": [{"id": 2, "text": "\\uac00\\uc744\\uc774\\ub294 \\uc601\\ud654\\ub97c \\ub2e4 \\ubcf8 \\ud6c4 \\uc5b4\\uba38\\ub2c8\\uc640 \\uc2dd\\ud488\\uad00\\uc5d0\\uc11c \\ub9cc\\ub098\\uae30\\ub85c \\ud588\\ub2e4. \\ub2e4\\uc74c\\uc740 \\uac00\\uc744\\uc774\\uac00 \\uc2dd\\ud488\\uad00\\uc5d0 \\uac00\\uae30 \\uc704\\ud574\\uc11c \\uace0\\ub824\\ud574\\uc57c \\ud560 \\uc804\\uc81c\\uc870\\uac74\\uc5d0 \\ub300\\ud574 \\ub098\\uc5f4\\ud55c \\uac83\\uc774\\ub2e4. \\ubc18\\ub4dc\\uc2dc \\ud544\\uc694\\ud55c \\uc870\\uac74\\uc73c\\ub85c\\ub9cc \\ubb36\\uc778 \\uac83\\uc740? \\r\\n\\r\\n\\r\\n\\r\\n\\u3131. \\uc5b4\\ub290 \\uc7a5\\uc18c\\ub97c \\uac00\\uc57c\\ud558\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3134. iMAP\\uc774 \\uc5b4\\ub514\\uc788\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3137. \\ubc31\\ud654\\uc810 \\uc6b4\\uc601\\uc2dc\\uac04\\uc744 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3139. \\uc704\\uce58 \\uc815\\ubcf4\\uc5d0 \\ub300\\ud55c \\ud78c\\ud2b8\\ub97c \\ud574\\ub3c5\\ud560 \\uc218 \\uc788\\ub294\\uac00?\\r\\n\\u3141. \\uc5d8\\ub9ac\\ubca0\\uc774\\ud130 \\uc791\\ub3d9\\uc740 \\uc6d0\\ud65c\\ud55c\\uac00?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 2, "name": "\\ubaa8\\ub378\\ub9c1 \\ub2a5\\ub825"}], "name": "among_elementary2"}, {"id": 3, "response": {"id": 3, "choices": [{"id": 11, "text": "2"}, {"id": 12, "text": "5"}, {"id": 13, "text": "7"}, {"id": 14, "text": "12"}, {"id": 15, "text": "13"}], "answer": {"id": 14, "text": "12"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 3, "blocks": [{"id": 3, "text": "\\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc77c\\ub828\\uc758 \\uc21c\\uc11c\\ub85c \\uc2dd\\ud488\\uad00\\uc744 \\ucc3e\\uc73c\\ub824\\uace0 \\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n1) iMAP\\uc73c\\ub85c \\uac00\\uc11c \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n2) \\uc704\\uce58\\uc815\\ubcf4\\ub97c \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n3) \\ud55c\\uae00 \\ud78c\\ud2b8\\uc5d0 \\ub300\\ud55c \\uc815\\ubcf4\\ub97c \\ud574\\ub3c5\\ud558\\uc5ec \\uc2dd\\ud488\\uad00\\uc774 \\uba87 \\uce35\\uc778\\uc9c0 \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n(\\ub2e8, \\uac01 \\uae00\\uc790\\ub294 \\u3131\\ubd80\\ud130 \\uc2dc\\uc791\\ud558\\uc5ec \\ubc18\\ubcf5\\ud69f\\uc218\\ub97c \\uacc4\\uc0b0\\ud55c\\ub2e4. \\uc608\\ub97c \\ub4e4\\uc5b4, \\u3137\\uc758 \\uacbd\\uc6b0 \\ubc18\\ubcf5\\ud69f\\uc218\\ub294 3\\ubc88\\uc774\\ub2e4.)\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n \\u2193\\r\\n4) \\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\ub97c \\ud655\\uc778\\ud558\\uc5ec \\uc7a5\\uc18c\\ub97c \\uc774\\ub3d9\\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n\\r\\n\\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\ud55c\\uae00 \\ud78c\\ud2b8 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uacfc\\uc815 3)\\uc744 \\uba87 \\ubc88 \\ubc18\\ubcf5\\ud574\\uc57c \\ud560\\uae4c? \\r\\n\\r\\n[ \\uc2dd\\ud488\\uad00: \\u3145 + \\u3141]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 4, "name": "\\uad6c\\ud604\\ub2a5\\ub825"}], "name": "among_elementary3"}, {"id": 4, "response": {"id": 4, "choices": [{"id": 16, "text": "bottom up"}, {"id": 17, "text": "top up up"}, {"id": 18, "text": "top down"}, {"id": 19, "text": "top left"}, {"id": 20, "text": "right up"}], "answer": {"id": 20, "text": "right up"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 4, "blocks": [{"id": 4, "text": "\\uac00\\uc744\\uc774 \\uc5b4\\uba38\\ub2c8\\uac00 \\uc2dd\\ud488\\uad00 \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc5d0 \\uc788\\ub2e4\\uace0 \\ud560 \\ub54c, \\uacfc\\uc77c\\ucf54\\ub108\\ub85c \\uac00\\uae30 \\uc704\\ud574\\uc11c\\ub294 \\ub2e4\\uc74c\\uc758 \\uc554\\ud638\\ub97c \\ud574\\ub3c5\\ud574\\uc57c \\ud55c\\ub2e4. \\r\\n\\r\\n[ 18090708202116 ]\\r\\n\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc554\\ud638\\uc758 \\uc22b\\uc790\\ub294 \\ub450 \\uc790\\ub9ac\\uc529 \\ub04a\\uc5b4\\uc11c \\ud574\\ub3c5\\ud558\\uace0 \\uac01\\uac01 1\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\uc5d0 \\ub300\\uc751\\ub41c\\ub2e4. \\uc554\\ud638\\uac00 \\ucd1d 7\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\ub85c \\uc774\\ub8e8\\uc5b4\\uc84c\\ub2e4\\uace0 \\ud560 \\ub54c, \\ub2e4\\uc74c \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc758 \\uc704\\uce58\\ub294 \\uc5b4\\ub514\\uc77c\\uae4c?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 3, "name": "\\uc77c\\ubc18\\ud654 \\ub2a5\\ub825"}], "name": "among_elementary4"}], "name": "elementary"}}, {"id": 54, "answers": [{"id": 5, "text": "\\u314c"}, {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, {"id": 14, "text": "12"}, {"id": 20, "text": "right up"}], "exam": {"id": 1, "questions": [{"id": 1, "response": {"id": 1, "choices": [{"id": 2, "text": "\\u3148"}, {"id": 3, "text": "\\u314a"}, {"id": 4, "text": "\\u314b"}, {"id": 5, "text": "\\u314c"}, {"id": 6, "text": "\\u314d"}], "answer": {"id": 3, "text": "\\u314a"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 1, "blocks": [{"id": 1, "text": "\\uac00\\uc744\\uc774\\ub294 \\ubc31\\ud654\\uc810 \\uc548\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc5d0\\uc11c \\uc601\\ud654\\ub97c \\ubcf4\\ub824\\uace0 \\ud569\\ub2c8\\ub2e4. \\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc21c\\uc11c\\ub85c \\uc7a5\\uc18c\\ub97c \\ucc3e\\uc744 \\uc218 \\uc788\\ub2e4.\\r\\n\\r\\n\\ubc31\\ud654\\uc810 \\ub0b4\\uc5d0 \\uc788\\ub294 \\uc601\\ud654\\uad00\\uc744 \\ucc3e\\uae30 \\uc704\\ud55c \\ubc29\\ubc95\\uc740 \\ub2e4\\uc74c\\uacfc \\uac19\\ub2e4.\\r\\n\\ubc31\\ud654\\uc810 1\\uce35\\uc5d0 \\uc788\\ub294 iMAP\\uc73c\\ub85c \\uac04\\ub2e4. \\uadf8 \\ub2e4\\uc74c \\uac00\\uace0\\uc790 \\ud558\\ub294 \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4. \\r\\niMAP\\uc5d0\\uc11c \\uc7a5\\uc18c\\uc5d0 \\ub300\\ud55c \\uc704\\uce58\\uc815\\ubcf4\\ub97c 2\\uac00\\uc9c0 \\ud78c\\ud2b8\\ub85c \\uc81c\\uacf5\\ub41c\\ub2e4. \\r\\n\\r\\n\\uccab \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc7a5\\uc18c\\uac00 \\uba87 \\uce35\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\ud55c\\uae00\\ub85c \\uc54c\\ub824\\uc8fc\\uace0,\\r\\n\\ub450 \\ubc88\\uc9f8 \\ud78c\\ud2b8\\ub294 \\uc5b4\\ub290 \\uc704\\uce58\\uc5d0 \\uc788\\ub294\\uc9c0\\ub97c \\uc22b\\uc790\\uc815\\ubcf4\\ub85c \\uc54c\\ub824\\uc900\\ub2e4.\\r\\n\\r\\n\\uc608\\ub97c \\ub4e4\\uc5b4, \\u3131\\uc774\\uba74 1\\uce35\\uc774\\uace0 \\u3141\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\ud55c\\uae00\\uacfc \\ub3d9\\uc77c\\ud558\\uac8c A\\uc774\\uba74 1\\uce35\\uc774\\uace0 E\\uc774\\uba74 5\\uce35\\uc744 \\ub098\\ud0c0\\ub0b8\\ub2e4.\\r\\n\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc601\\ud654\\uad00\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\uc704\\uce58 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uc601\\ud654\\uad00\\uc740 \\uba87 \\uce35\\uc5d0 \\uc788\\uc2b5\\ub2c8\\uae4c? \\r\\n\\r\\n\\r\\n[ \\uc601\\ud654\\uad00 : \\u3137 + \\u3147 ]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 1, "name": "\\ubd84\\uc11d\\ub2a5\\ub825"}], "name": "among_elementary1"}, {"id": 2, "response": {"id": 2, "choices": [{"id": 1, "text": "\\u3131, \\u3134"}, {"id": 7, "text": "\\u3131, \\u3134, \\u3137, \\u3139, \\u3141"}, {"id": 8, "text": "\\u3131, \\u3134, \\u3137"}, {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, {"id": 10, "text": "\\u3131, \\u3134, \\u3141"}], "answer": {"id": 9, "text": "\\u3131, \\u3134, \\u3139"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 2, "blocks": [{"id": 2, "text": "\\uac00\\uc744\\uc774\\ub294 \\uc601\\ud654\\ub97c \\ub2e4 \\ubcf8 \\ud6c4 \\uc5b4\\uba38\\ub2c8\\uc640 \\uc2dd\\ud488\\uad00\\uc5d0\\uc11c \\ub9cc\\ub098\\uae30\\ub85c \\ud588\\ub2e4. \\ub2e4\\uc74c\\uc740 \\uac00\\uc744\\uc774\\uac00 \\uc2dd\\ud488\\uad00\\uc5d0 \\uac00\\uae30 \\uc704\\ud574\\uc11c \\uace0\\ub824\\ud574\\uc57c \\ud560 \\uc804\\uc81c\\uc870\\uac74\\uc5d0 \\ub300\\ud574 \\ub098\\uc5f4\\ud55c \\uac83\\uc774\\ub2e4. \\ubc18\\ub4dc\\uc2dc \\ud544\\uc694\\ud55c \\uc870\\uac74\\uc73c\\ub85c\\ub9cc \\ubb36\\uc778 \\uac83\\uc740? \\r\\n\\r\\n\\r\\n\\r\\n\\u3131. \\uc5b4\\ub290 \\uc7a5\\uc18c\\ub97c \\uac00\\uc57c\\ud558\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3134. iMAP\\uc774 \\uc5b4\\ub514\\uc788\\ub294\\uc9c0 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3137. \\ubc31\\ud654\\uc810 \\uc6b4\\uc601\\uc2dc\\uac04\\uc744 \\uc54c\\uace0\\uc788\\ub294\\uac00?\\r\\n\\u3139. \\uc704\\uce58 \\uc815\\ubcf4\\uc5d0 \\ub300\\ud55c \\ud78c\\ud2b8\\ub97c \\ud574\\ub3c5\\ud560 \\uc218 \\uc788\\ub294\\uac00?\\r\\n\\u3141. \\uc5d8\\ub9ac\\ubca0\\uc774\\ud130 \\uc791\\ub3d9\\uc740 \\uc6d0\\ud65c\\ud55c\\uac00?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 2, "name": "\\ubaa8\\ub378\\ub9c1 \\ub2a5\\ub825"}], "name": "among_elementary2"}, {"id": 3, "response": {"id": 3, "choices": [{"id": 11, "text": "2"}, {"id": 12, "text": "5"}, {"id": 13, "text": "7"}, {"id": 14, "text": "12"}, {"id": 15, "text": "13"}], "answer": {"id": 14, "text": "12"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 3, "blocks": [{"id": 3, "text": "\\ub2e4\\uc74c\\uacfc \\uac19\\uc740 \\uc77c\\ub828\\uc758 \\uc21c\\uc11c\\ub85c \\uc2dd\\ud488\\uad00\\uc744 \\ucc3e\\uc73c\\ub824\\uace0 \\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n1) iMAP\\uc73c\\ub85c \\uac00\\uc11c \\uc7a5\\uc18c\\ub97c \\uac80\\uc0c9\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n2) \\uc704\\uce58\\uc815\\ubcf4\\ub97c \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n \\u2193\\r\\n3) \\ud55c\\uae00 \\ud78c\\ud2b8\\uc5d0 \\ub300\\ud55c \\uc815\\ubcf4\\ub97c \\ud574\\ub3c5\\ud558\\uc5ec \\uc2dd\\ud488\\uad00\\uc774 \\uba87 \\uce35\\uc778\\uc9c0 \\ud655\\uc778\\ud55c\\ub2e4.\\r\\n(\\ub2e8, \\uac01 \\uae00\\uc790\\ub294 \\u3131\\ubd80\\ud130 \\uc2dc\\uc791\\ud558\\uc5ec \\ubc18\\ubcf5\\ud69f\\uc218\\ub97c \\uacc4\\uc0b0\\ud55c\\ub2e4. \\uc608\\ub97c \\ub4e4\\uc5b4, \\u3137\\uc758 \\uacbd\\uc6b0 \\ubc18\\ubcf5\\ud69f\\uc218\\ub294 3\\ubc88\\uc774\\ub2e4.)\\r\\n*\\ud55c\\uae00 \\uc21c\\uc11c: \\u3131 \\u3134 \\u3137 \\u3139 \\u3141 \\u3142 \\u3145 \\u3147 \\u3148 \\u314a \\u314b \\u314c \\u314d \\u314e\\r\\n \\u2193\\r\\n4) \\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\ub97c \\ud655\\uc778\\ud558\\uc5ec \\uc7a5\\uc18c\\ub97c \\uc774\\ub3d9\\ud55c\\ub2e4.\\r\\n\\r\\n\\r\\n\\r\\n\\uc2dd\\ud488\\uad00\\uc758 \\uc704\\uce58\\uc5d0 \\ub300\\ud55c iMAP\\uc758 \\ud55c\\uae00 \\ud78c\\ud2b8 \\uc815\\ubcf4\\uac00 \\ub2e4\\uc74c\\uacfc \\uac19\\uc744 \\ub54c, \\uacfc\\uc815 3)\\uc744 \\uba87 \\ubc88 \\ubc18\\ubcf5\\ud574\\uc57c \\ud560\\uae4c? \\r\\n\\r\\n[ \\uc2dd\\ud488\\uad00: \\u3145 + \\u3141]", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 4, "name": "\\uad6c\\ud604\\ub2a5\\ub825"}], "name": "among_elementary3"}, {"id": 4, "response": {"id": 4, "choices": [{"id": 16, "text": "bottom up"}, {"id": 17, "text": "top up up"}, {"id": 18, "text": "top down"}, {"id": 19, "text": "top left"}, {"id": 20, "text": "right up"}], "answer": {"id": 20, "text": "right up"}, "polymorphic_ctype": 16, "resourcetype": "UniqueAnswerResponse"}, "context_block": {"id": 4, "blocks": [{"id": 4, "text": "\\uac00\\uc744\\uc774 \\uc5b4\\uba38\\ub2c8\\uac00 \\uc2dd\\ud488\\uad00 \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc5d0 \\uc788\\ub2e4\\uace0 \\ud560 \\ub54c, \\uacfc\\uc77c\\ucf54\\ub108\\ub85c \\uac00\\uae30 \\uc704\\ud574\\uc11c\\ub294 \\ub2e4\\uc74c\\uc758 \\uc554\\ud638\\ub97c \\ud574\\ub3c5\\ud574\\uc57c \\ud55c\\ub2e4. \\r\\n\\r\\n[ 18090708202116 ]\\r\\n\\r\\n*\\uc601\\ubb38 \\uc21c\\uc11c: A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\\r\\n\\r\\n\\uc554\\ud638\\uc758 \\uc22b\\uc790\\ub294 \\ub450 \\uc790\\ub9ac\\uc529 \\ub04a\\uc5b4\\uc11c \\ud574\\ub3c5\\ud558\\uace0 \\uac01\\uac01 1\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\uc5d0 \\ub300\\uc751\\ub41c\\ub2e4. \\uc554\\ud638\\uac00 \\ucd1d 7\\uac1c\\uc758 \\uc601\\ubb38\\uc790\\ub85c \\uc774\\ub8e8\\uc5b4\\uc84c\\ub2e4\\uace0 \\ud560 \\ub54c, \\ub2e4\\uc74c \\uc911 \\uacfc\\uc77c\\ucf54\\ub108\\uc758 \\uc704\\uce58\\ub294 \\uc5b4\\ub514\\uc77c\\uae4c?", "polymorphic_ctype": 15, "resourcetype": "TextBlock"}]}, "tags": [{"id": 3, "name": "\\uc77c\\ubc18\\ud654 \\ub2a5\\ub825"}], "name": "among_elementary4"}], "name": "elementary"}}], "name": "\\ud64d\\uc2b9\\uc758"}')
print(c_json) | AMONG-py | /AMONG_py-0.0.3.4-py3-none-any.whl/AMONGpy/visualiztion.py | visualiztion.py |
import csv, json, itertools
def get_recommended_project(exam_logs_json) :
'''
Student json
{
"name" : "이름",
"id" : "아이디",
"test" : [
{"answer" : [5, 4]},
{"answer" : [2, 4]}
]
}
Test json
[
{"name" : "시험 이름",
"number" : 10,
"problems" : [
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 3
},
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 2
}]},
{"name" : "시험 이름",
"number" : 15,
"problems" : [
{
"tags" : ["tag1", "tag2"],
"correctanswer" : 2
}
]}]
:param student_json:
:param test_json:
:return:
{
"name":"프로젝트 이름",
"tool":"사용교구(예. 아두이노, 앱인벤터, ...)"
"difficuly":0~20
}
'''
f = open('AMONGpy/projectKeyword.csv', 'r', encoding='utf-8')
rdr = csv.reader(f)
project_list = []
for idx, line in enumerate(rdr):
if idx >= 1:
project_list.append({"분류":line[0], "프로젝트명":line[1], "난이도":line[2], "키워드":line[3]})
print(project_list)
exam_logs_json = json.loads(exam_logs_json)
scores = [len([a for a, q in zip(e['answers'], e['exam']['questions']) if a['id'] == q['response']['answer']['id']]) / len(e['exam']['questions']) * 20.0 for e in exam_logs_json['exam_logs']]
print(scores)
all_tag = [q['tags'] for e in exam_logs_json['exam_logs'] for q in e['exam']['questions']]
all_tag = list(itertools.chain(*all_tag))
all_tag = [x['name'] for x in all_tag]
all_tag = list(set(all_tag))
all_tag = sorted(all_tag)
accs = [[q for a, q in zip(e['answers'], e['exam']['questions']) if a['id'] == q['response']['answer']['id']]
for e in exam_logs_json['exam_logs']]
for acc in accs :
for p in acc :
tag_list = []
for t in p['tags'] :
tag_list.append(t['name'])
p['tags'] = tag_list
tag_cor = []
for t in all_tag:
acc_scores = [sum([1.0 / len(p['tags']) for p in acc if t in p['tags']]) for i, acc in enumerate(accs)]
tag_cor.append((t, acc_scores))
tag_cor = sorted(tag_cor, key=lambda x : x[1])
print(tag_cor)
project_list = sorted(project_list, key=lambda x : abs(int(x["난이도"]) - scores[-1]))
print(project_list)
distlist = []
for i, p in enumerate(project_list) :
for j, t in enumerate(tag_cor) :
if p["키워드"] == t[0] :
distlist.append((p, i + j * 1.5))
distlist = sorted(distlist, key=lambda x : x[1])
print(distlist)
if len(distlist) == 0 :
print('There was any matched tag with the test and recommandable projects')
return None
most_recommended_project = distlist[0][0]
return json.dumps({"name":most_recommended_project["프로젝트명"], "tool":most_recommended_project["분류"], "difficulty":most_recommended_project["난이도"]})
if __name__ == "__main__" :
p = get_recommended_project('{'
'"id": 7,'
'"exam_logs": ['
'{'
'"id": 5,'
'"answers": ['
'{'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'{'
'"id": 6,'
'"text": "승의가 잘못했다"'
'}'
'],'
'"exam": {'
'"id": 4,'
'"questions": ['
'{'
'"id": 5,'
'"response": {'
'"id": 5,'
'"choices": ['
'{'
'"id": 2,'
'"text": "상준이가 잘했다"'
'},'
'{'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'{'
'"id": 4,'
'"text": "고러엄"'
'},'
'{'
'"id": 5,'
'"text": "안녕"'
'}'
'],'
'"answer": {'
'"id": 3,'
'"text": "우진이가 잘했다"'
'},'
'"polymorphic_ctype": 16,'
'"resourcetype": "UniqueAnswerResponse"'
'},'
'"context_block": {'
'"id": 1,'
'"blocks": []'
'},'
'"tags": ['
'{'
'"id": 2,'
'"name": "아두이노"'
'},'
'{'
'"id": 3,'
'"name": "자료수집"'
'}'
'],'
'"name": "1번문제"'
'},'
'{'
'"id": 6,'
'"response": {'
'"id": 6,'
'"choices": ['
'{'
'"id": 6,'
'"text": "승의가 잘못했다"'
'},'
'{'
'"id": 7,'
'"text": "승의가 잘했다"'
'}'
'],'
'"answer": {'
'"id": 7,'
'"text": "승의가 잘했다"'
'},'
'"polymorphic_ctype": 16,'
'"resourcetype": "UniqueAnswerResponse"'
'},'
'"context_block": {'
'"id": 2,'
'"blocks": ['
'{'
'"id": 7,'
'"text": "과연 상준이가 잘했을까? 우진이가 잘했을까?",'
'"polymorphic_ctype": 15,'
'"resourcetype": "TextBlock"'
'},'
'{'
'"id": 8,'
'"text": "과연 누가 잘했을까? 보기에서 잘 골라보자",'
'"polymorphic_ctype": 15,'
'"resourcetype": "TextBlock"'
'}'
']'
'},'
'"tags": ['
'{'
'"id": 2,'
'"name": "아두이노"'
'}'
'],'
'"name": "두번째 문제"'
'}'
'],'
'"name": "첫번째 시험"'
'}'
'}'
'],'
'"name": "홍승의"'
'}')
print(p) | AMONG-py | /AMONG_py-0.0.3.4-py3-none-any.whl/AMONGpy/analysis.py | analysis.py |
AMP: Automatic Mathematical Parser
==================================
Python 2.7 library that parses and evaluates any string numerical mathematical expression, with support for complex numbers and mathematical constants, as well as scientific notation and a wide range of mathematical functions.
Documentation
-------------
The documentation available as of the date of this release is included in the docs/ directory.
Installation
------------
Installation of this package uses pip, included by default in Python 2.7.9 and later. If your computer doesn’t have pip, download `this file
<http://bootstrap.pypa.io/get-pip.py>`_. Then open up your terminal and enter the command shown below::
python get-pip.py
Installation of the AMP package can be done by opening your computer's terminal and entering the following::
pip install AMP
License
-------
See the file called LICENSE.txt.
Links
-----
The most up-to-date version of this software can be found at:
* https://github.com/ioguntol/Automatic-Mathematical-Parser
* https://pypi.python.org/pypi/AMP/
Contact
-------
The developer, Ini Oguntola, can be contacted at [email protected].
| AMP | /AMP-1.1.4.tar.gz/AMP-1.1.4/README.txt | README.txt |
# AMPAL
A simple, intuitive and Pythonic framework for representing biomolecular structure.
[](https://circleci.com/gh/isambard-uob/ampal/tree/master)
[]()
[](https://github.com/isambard-uob/ampal/blob/master/LICENSE)
## Installation
You can install AMPAL from pip:
`pip install ampal`
Or from source by downloading/cloning this repository, navigating to the folder
and typing:
`pip install .`
AMPAL uses Cython, so if you're installing from source make sure you have it
installed.
## Super Quick Start
Load a PDB file into AMPAL:
```Python
my_structure = ampal.load_pdb('3qy1.pdb')
print(my_structure)
# OUT: <Assembly (3qy1) containing 2 Polypeptides, 449 Ligands>
```
Select regions of the structure in an intuitive manner:
```Python
my_atom = my_structure['A']['56']['CA']
print(my_structure['A']['56']['CA'])
# OUT: <Carbon Atom (CA). Coordinates: (6.102, -4.287, -29.607)>
```
Then climb all the way back up the hierachy:
```Python
print(my_atom.parent)
# OUT: <Residue containing 9 Atoms. Residue code: GLU>
print(my_atom.parent.parent)
# OUT: <Polypeptide containing 215 Residues. Sequence: DIDTLISNNALW...>
print(my_atom.parent.parent.parent)
# OUT: <Assembly (3qy1) containing 2 Polypeptides, 449 Ligands>
```
This is just a quick introduction, AMPAL contain tonnes of tools for making
complex selections and performing analysis. Take a look at the
[docs](https://isambard-uob.github.io/ampal/) to find out more.
## Release Notes
## v1.4.0
* **Adds `get_ss_regions` to `ampal.dssp`.** This function can be used to
extract all regions of a protein in a particular secondary structure.
* **Fixes bug with DSSP `ss_region` tagging.** End residues used to be missed.
## v1.3.0
* **Adds an interface for NACCESS.** Functions for using NACCESS to calculate
solvent accessibility.
### v1.2.0
* **Adds an interface for DSSP.** If you have DSSP on your computer and have the
`mkdssp` command available on your path, you can use the `ampal.tag_dssp_data`
function to add secondary structure information to the tags dictionary of the
residues in your structure.
* **Adds the `ampal.align` module.** Contains a simple class for aligning two
`Polypeptides` using MMC. The simplest interface is the `align_backbones`
function.
* This is currently super inefficient and will be reimplemented.
### v1.1.0
* **Adds the centroid property to residues.**
| AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/README.md | README.md |
from collections import Counter
import itertools
from ampal.base_ampal import BaseAmpal, Polymer, find_atoms_within_distance
from ampal.ligands import LigandGroup, Ligand
from ampal.analyse_protein import (
sequence_molecular_weight, sequence_molar_extinction_280,
sequence_isoelectric_point)
class AmpalContainer(object):
"""Custom list type class that holds multiple model states.
Notes
-----
In this case, a state is defined as a set of coordinates that
represents a protein model and an associated score or set of scores.
Parameters
----------
ampal_objects : [AMPAL], optional
A list of AMPAL objects with which to initialise the
AMPAL container. This can be an `Assembly`, `Polymer`
or `Monomer`.
id : str, optional
Identifier for the AMPAL container.
Attributes
----------
id : str
Identifier for the AMPAL container.
"""
def __init__(self, ampal_objects=None, id=None):
self.id = 'AMPAL Container' if not id else id
if ampal_objects:
self._ampal_objects = ampal_objects
else:
self._ampal_objects = []
def __add__(self, other):
"""Merges two `AmpalContainers`.
Notes
-----
Generates new `AmpalContainer`.
"""
if isinstance(other, AmpalContainer):
merged_ac = self._ampal_objects[:] + other._ampal_objects[:]
else:
raise TypeError(
'Only AmpalContainer objects may be merged with an '
'AmpalContainer using unary operator "+".')
return AmpalContainer(ampal_objects=merged_ac)
def __repr__(self):
return "<AmpalContainer ({}) containing {} AMPAL Objects>".format(
self.id, len(self._ampal_objects))
def __len__(self):
return len(self._ampal_objects)
def __getitem__(self, item):
if isinstance(item, str):
id_dict = {p.id.split('_')[-1]: p for p in self._ampal_objects}
return id_dict[item]
elif isinstance(item, int):
return self._ampal_objects[item]
else:
return AmpalContainer(self._ampal_objects[item])
def append(self, item):
"""Adds an AMPAL object to the `AmpalContainer`."""
self._ampal_objects.append(item)
return
def extend(self, ampal_container):
"""Extends an `AmpalContainer` with another `AmpalContainer`."""
if isinstance(ampal_container, AmpalContainer):
self._ampal_objects.extend(ampal_container)
else:
raise TypeError(
'Only AmpalContainer objects may be merged with '
'an AmpalContainer.')
return
@property
def pdb(self):
"""Compiles the PDB strings for each state into a single file."""
header_title = '{:<80}\n'.format('HEADER {}'.format(self.id))
data_type = '{:<80}\n'.format('EXPDTA ISAMBARD Model')
pdb_strs = []
for ampal in self:
if isinstance(ampal, Assembly):
pdb_str = ampal.make_pdb(header=False, footer=False)
else:
pdb_str = ampal.make_pdb()
pdb_strs.append(pdb_str)
merged_strs = 'ENDMDL\n'.join(pdb_strs) + 'ENDMDL\n'
merged_pdb = ''.join([header_title, data_type, merged_strs])
return merged_pdb
def sort_by_tag(self, tag):
"""Sorts the `AmpalContainer` by a tag on the component objects.
Parameters
----------
tag : str
Key of tag used for sorting.
"""
return AmpalContainer(sorted(self, key=lambda x: x.tags[tag]))
class Assembly(BaseAmpal):
"""A container that holds `Polymer` type objects.
Notes
-----
Has a simple hierarchy: `Assembly` contains one or more `Polymer`,
which in turn contains one or more `Monomer`.
Parameters
----------
molecules : Polymer or [Polymer], optional
`Polymer` or list containing `Polymer` objects to be assembled.
assembly_id : str, optional
An ID that the user can use to identify the `Assembly`. This
is used when generating a pdb file using `Assembly().pdb`.
Raises
------
TypeError
`Assembly` objects can only be initialised empty, using a `Polymer`
or a list of `Polymers`.
"""
def __init__(self, molecules=None, assembly_id=''):
if molecules:
if isinstance(molecules, Polymer):
self._molecules = [molecules]
elif isinstance(molecules, list) and isinstance(molecules[0], Polymer):
self._molecules = list(molecules)
else:
raise TypeError(
'Assembly objects can only be initialised empty, using '
'a Polymer or a list of Polymers.')
else:
self._molecules = []
self.id = str(assembly_id)
self.tags = {}
def __add__(self, other):
"""Merges together two `Assemblies`.
Raises
------
TypeError
Raised if other is any type other than `Assembly`.
"""
if isinstance(other, Assembly):
merged_assembly = self._molecules[:] + other._molecules[:]
else:
raise TypeError(
'Only Assembly objects may be merged with an Assembly using '
'unary operator "+".')
return Assembly(molecules=merged_assembly, assembly_id=self.id)
def __len__(self):
return len(self._molecules)
def __getitem__(self, item):
if isinstance(item, str):
id_dict = {str(p.id): p for p in self._molecules}
return id_dict[item]
elif isinstance(item, int):
return self._molecules[item]
else:
return Assembly(self._molecules[item], assembly_id=self.id)
def __repr__(self):
repr_strs = []
mol_types = Counter([x.molecule_type for x in self._molecules])
if 'protein' in mol_types:
repr_strs.append('{} {}'.format(
mol_types['protein'],
'Polypeptide' if len(self._molecules) == 1 else 'Polypeptides'))
if 'nucleic_acid' in mol_types:
repr_strs.append('{} {}'.format(
mol_types['nucleic_acid'],
'Polynucleotide' if len(self._molecules) == 1 else 'Polynucleotides'))
ligand_count = 0
if 'ligands' in mol_types:
repr_strs.append('{} {}'.format(
mol_types['ligands'],
'Ligand Group' if len(self._molecules) == 1 else 'Ligand Groups'))
for mol in self._molecules:
if mol.molecule_type == 'ligands':
ligand_count += len(mol)
else:
ligand_count += 0 if not mol.ligands else len(mol.ligands)
if ligand_count:
repr_strs.append('{} {}'.format(
ligand_count, 'Ligand' if ligand_count == 1 else 'Ligands'))
if 'pseudo_group' in mol_types:
repr_strs.append('{} {}'.format(
mol_types['pseudo_group'],
'Pseudo Group' if len(self._molecules) == 1 else 'Pseudo Groups'))
id_str = '' if not self.id else '({}) '.format(self.id)
return '<Assembly {}containing {}>'.format(id_str, ', '.join(repr_strs))
def append(self, item):
"""Adds a `Polymer` to the `Assembly`.
Raises
------
TypeError
Raised if other is any type other than `Polymer`.
"""
if isinstance(item, Polymer):
self._molecules.append(item)
else:
raise TypeError(
'Only Polymer objects can be appended to an Assembly.')
return
def extend(self, assembly):
"""Extends the `Assembly` with the contents of another `Assembly`.
Raises
------
TypeError
Raised if other is any type other than `Assembly`.
"""
if isinstance(assembly, Assembly):
self._molecules.extend(assembly)
else:
raise TypeError(
'Only Assembly objects may be merged with an Assembly.')
return
def get_monomers(self, ligands=True, pseudo_group=False):
"""Retrieves all the `Monomers` from the `Assembly` object.
Parameters
----------
ligands : bool, optional
If `true`, will include ligand `Monomers`.
pseudo_group : bool, optional
If `True`, will include pseudo atoms.
"""
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
monomers = itertools.chain(
*(p.get_monomers(ligands=ligands) for p in in_groups))
return monomers
def get_ligands(self, solvent=True):
"""Retrieves all ligands from the `Assembly`.
Parameters
----------
solvent : bool, optional
If `True`, solvent molecules will be included.
"""
if solvent:
ligand_list = [x for x in self.get_monomers()
if isinstance(x, Ligand)]
else:
ligand_list = [x for x in self.get_monomers() if isinstance(
x, Ligand) and not x.is_solvent]
return LigandGroup(monomers=ligand_list)
def get_atoms(self, ligands=True, pseudo_group=False, inc_alt_states=False):
""" Flat list of all the `Atoms` in the `Assembly`.
Parameters
----------
ligands : bool, optional
Include ligand `Atoms`.
pseudo_group : bool, optional
Include pseudo_group `Atoms`.
inc_alt_states : bool, optional
Include alternate sidechain conformations.
Returns
-------
atoms : itertools.chain
All the `Atoms` as a iterator.
"""
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states))
for m in self.get_monomers(ligands=ligands,
pseudo_group=pseudo_group)))
return atoms
def is_within(self, cutoff_dist, point, ligands=True):
"""Returns all atoms in AMPAL object within `cut-off` distance from the `point`."""
return find_atoms_within_distance(self.get_atoms(ligands=ligands), cutoff_dist, point)
def relabel_all(self):
"""Relabels all Polymers, Monomers and Atoms with default labeling."""
self.relabel_polymers()
self.relabel_monomers()
self.relabel_atoms()
return
def relabel_polymers(self, labels=None):
"""Relabels the component Polymers either in alphabetical order or
using a list of labels.
Parameters
----------
labels : list, optional
A list of new labels.
Raises
------
ValueError
Raised if the number of labels does not match the number of
component Polymer objects.
"""
if labels:
if len(self._molecules) == len(labels):
for polymer, label in zip(self._molecules, labels):
polymer.id = label
else:
raise ValueError('Number of polymers ({}) and number of labels ({}) must be equal.'.format(
len(self._molecules), len(labels)))
else:
for i, polymer in enumerate(self._molecules):
polymer.id = chr(i + 65)
return
def relabel_monomers(self):
"""Relabels all Monomers in the component Polymers in numerical order."""
for polymer in self._molecules:
polymer.relabel_monomers()
return
def relabel_atoms(self, start=1):
"""Relabels all Atoms in numerical order, offset by the start parameter.
Parameters
----------
start : int, optional
Defines an offset for the labelling.
"""
counter = start
for atom in self.get_atoms(ligands=True):
atom.id = counter
counter += 1
return
@property
def pdb(self):
"""Runs make_pdb in default mode."""
return self.make_pdb()
def make_pdb(self, ligands=True, alt_states=False, pseudo_group=False,
header=True, footer=True):
"""Generates a PDB string for the Assembly.
Parameters
----------
ligands : bool, optional
If `True`, will include ligands in the output.
alt_states : bool, optional
If `True`, will include alternate conformations in the output.
pseudo_group : bool, optional
If `True`, will include pseudo atoms in the output.
header : bool, optional
If `True` will write a header for output.
footer : bool, optional
If `True` will write a footer for output.
Returns
-------
pdb_str : str
String of the pdb for the Assembly. Generated by collating
Polymer().pdb calls for the component Polymers.
"""
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
pdb_header = 'HEADER {:<80}\n'.format(
'ISAMBARD Model {}'.format(self.id)) if header else ''
pdb_body = ''.join([x.make_pdb(
alt_states=alt_states, inc_ligands=ligands) + '{:<80}\n'.format('TER') for x in in_groups])
pdb_footer = '{:<80}\n'.format('END') if footer else ''
pdb_str = ''.join([pdb_header, pdb_body, pdb_footer])
return pdb_str
# Protein specific methods
@property
def backbone(self):
"""Generates a new `Assembly` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are
retained.
Returns
-------
bb_assembly : ampal.Protein
`Assembly` containing only the backbone atoms of the original
`Assembly`.
"""
bb_molecules = [
p.backbone for p in self._molecules if hasattr(p, 'backbone')]
bb_assembly = Assembly(bb_molecules, assembly_id=self.id)
return bb_assembly
@property
def primitives(self):
"""Generates a new `Assembly` containing the primitives of each Polymer.
Notes
-----
Metadata is not currently preserved from the parent object.
Returns
-------
prim_assembly : ampal.Protein
`Assembly` containing only the primitives of the `Polymers`
in the original `Assembly`.
"""
prim_molecules = [
p.primitive for p in self._molecules if hasattr(p, 'primitive')]
prim_assembly = Assembly(molecules=prim_molecules, assembly_id=self.id)
return prim_assembly
@property
def sequences(self):
"""Returns the sequence of each `Polymer` in the `Assembly` as a list.
Returns
-------
sequences : [str]
List of sequences.
"""
seqs = [x.sequence for x in self._molecules if hasattr(x, 'sequence')]
return seqs
@property
def molecular_weight(self):
"""Returns the molecular weight of the `Assembly` in Daltons."""
return sequence_molecular_weight(''.join(self.sequences))
@property
def molar_extinction_280(self):
"""Returns the extinction co-efficient of the `Assembly` at 280 nm."""
return sequence_molar_extinction_280(''.join(self.sequences))
@property
def isoelectric_point(self):
"""Returns the isoelectric point of the `Assembly`."""
return sequence_isoelectric_point(''.join(self.sequences))
@property
def fasta(self):
"""Generates a FASTA string for the `Assembly`.
Notes
-----
Explanation of FASTA format: https://en.wikipedia.org/wiki/FASTA_format
Recommendation that all lines of text be shorter than 80
characters is adhered to. Format of PDBID|CHAIN|SEQUENCE is
consistent with files downloaded from the PDB. Uppercase
PDBID used for consistency with files downloaded from the PDB.
Useful for feeding into cdhit and then running sequence clustering.
Returns
-------
fasta_str : str
String of the fasta file for the `Assembly`.
"""
fasta_str = ''
max_line_length = 79
for p in self._molecules:
if hasattr(p, 'sequence'):
fasta_str += '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format(
self.id.upper(), p.id)
seq = p.sequence
split_seq = [seq[i: i + max_line_length]
for i in range(0, len(seq), max_line_length)]
for seq_part in split_seq:
fasta_str += '{0}\n'.format(seq_part)
return fasta_str
def tag_torsion_angles(self, force=False):
"""Tags each `Monomer` in the `Assembly` with its torsion angles.
Parameters
----------
force : bool, optional
If `True`, the tag will be run even if `Monomers` are already
tagged.
"""
for polymer in self._molecules:
if polymer.molecule_type == 'protein':
polymer.tag_torsion_angles(force=force)
return
def tag_ca_geometry(self, force=False, reference_axis=None,
reference_axis_name='ref_axis'):
"""Tags each `Monomer` in the `Assembly` with its helical geometry.
Parameters
----------
force : bool, optional
If True the tag will be run even if `Monomers` are already tagged.
reference_axis : list(numpy.array or tuple or list), optional
Coordinates to feed to geometry functions that depend on
having a reference axis.
reference_axis_name : str, optional
Used to name the keys in tags at `Chain` and `Residue` level.
"""
for polymer in self._molecules:
if polymer.molecule_type == 'protein':
polymer.tag_ca_geometry(
force=force, reference_axis=reference_axis,
reference_axis_name=reference_axis_name)
return
def tag_atoms_unique_ids(self, force=False):
""" Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged.
"""
tagged = ['unique_id' in x.tags.keys() for x in self.get_atoms()]
if (not all(tagged)) or force:
for m in self.get_monomers():
for atom_type, atom in m.atoms.items():
atom.tags['unique_id'] = (m.unique_id, atom_type)
return
def filter_mol_types(self, mol_types):
return [x for x in self._molecules if x.molecule_type not in mol_types]
__author__ = "Christopher W. Wood, Gail J. Bartlett" | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/assembly.py | assembly.py |
from ampal.base_ampal import Polymer, Monomer
class LigandGroup(Polymer):
"""A container for `Ligand` `Monomers`.
Parameters
----------
monomers : Monomer or [Monomer], optional
Monomer or list containing Monomer objects to form the Polymer().
polymer_id : str, optional
An ID that the user can use to identify the `Polymer`. This is
used when generating a pdb file using `Polymer().pdb`.
parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
"""
def __init__(self, monomers=None, polymer_id=' ', parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id, molecule_type='ligands',
parent=parent, sl=sl)
def __repr__(self):
return '<Ligands chain containing {} {}>'.format(
len(self._monomers),
'Ligand' if len(self._monomers) == 1 else 'Ligands')
class Ligand(Monomer):
"""`Monomer` that represents a `Ligand`.
Notes
-----
All `Monomers` that do not have dedicated classes are
represented using the `Ligand` class.
Parameters
----------
mol_code : str
PDB molecule code that represents the monomer.
atoms : OrderedDict, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
monomer_id : str, optional
String used to identify the residue.
insertion_code : str, optional
Insertion code of monomer, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
Attributes
----------
atoms : OrderedDict
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str
PDB molecule code that represents the `Ligand`.
insertion_code : str
Insertion code of `Ligand`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
self.states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Ligand`.
id : str
String used to identify the residue.
parent : Polymer or None
A reference to the `LigandGroup` containing this `Ligand`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, mol_code, atoms=None, monomer_id=' ', insertion_code=' ',
is_hetero=False, parent=None):
super(Ligand, self).__init__(
atoms, monomer_id, parent=parent)
self.mol_code = mol_code
self.insertion_code = insertion_code
self.is_hetero = is_hetero
def __repr__(self):
return '<Ligand containing {} {}. Ligand code: {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms',
self.mol_code)
__author__ = "Christopher W. Wood, Kieran L. Hudson" | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/ligands.py | ligands.py |
import copy
import math
import random
import sys
from typing import List, Optional
import numpy
from .geometry import unit_vector
from .protein import Polypeptide
def align_backbones(reference, mobile, stop_when=None, verbose=False):
mobile = copy.deepcopy(mobile)
initial_trans = reference.centre_of_mass - mobile.centre_of_mass
mobile.translate(initial_trans)
fitter = MMCAlign(_align_eval, [reference], mobile)
fitter.start_optimisation(500, 10, 1, temp=100, stop_when=stop_when,
verbose=verbose)
return fitter.best_energy
def _align_eval(loop, reference):
return loop.rmsd(reference, backbone=True)
class MMCAlign:
"""A alignment protocol that uses Metropolis Monte Carlo.
Notes
-----
THIS IS CURRENTLY SUPER INEFFICIENT DUE TO THE DEEPCOPIES.
I plan to improve this by aligning arrays of atoms and only
recording the rotation and translation that led to that alignment.
Parameters
----------
eval_fn : Polypeptide -> float
A function to evaluate the quality of your fit.
eval_args : list
A list of static args to be used in the `eval_fn`, these will
be unpacked into the evaluation function _i.e._
`eval_fn(polypeptide, *eval_args).
polypeptide : Polypeptide
An ampal polypeptide containing the model to be aligned.
"""
def __init__(self, eval_fn, eval_args: Optional[list],
polypeptide: Polypeptide) -> None:
self.eval_fn = eval_fn
if eval_args is None:
self.eval_args: List = []
else:
self.eval_args = eval_args
self.current_energy = None
self.best_energy = None
self.best_model = None
self.polypeptide = polypeptide
def start_optimisation(self, rounds: int, max_angle: float,
max_distance: float, temp: float=298.15,
stop_when=None, verbose=None):
"""Starts the loop fitting protocol.
Parameters
----------
rounds : int
The number of Monte Carlo moves to be evaluated.
max_angle : float
The maximum variation in rotation that can moved per
step.
max_distance : float
The maximum distance the can be moved per step.
temp : float, optional
Temperature used during fitting process.
stop_when : float, optional
Stops fitting when energy is less than or equal to this value.
"""
self._generate_initial_score()
self._mmc_loop(rounds, max_angle, max_distance, temp=temp,
stop_when=stop_when, verbose=verbose)
return
def _generate_initial_score(self):
"""Runs the evaluation function for the initial pose."""
self.current_energy = self.eval_fn(self.polypeptide, *self.eval_args)
self.best_energy = copy.deepcopy(self.current_energy)
self.best_model = copy.deepcopy(self.polypeptide)
return
def _mmc_loop(self, rounds, max_angle, max_distance,
temp=298.15, stop_when=None, verbose=True):
"""The main Metropolis Monte Carlo loop."""
current_round = 0
while current_round < rounds:
working_model = copy.deepcopy(self.polypeptide)
random_vector = unit_vector(numpy.random.uniform(-1, 1, size=3))
mode = random.choice(['rotate', 'rotate', 'rotate', 'translate'])
if mode == 'rotate':
random_angle = numpy.random.rand() * max_angle
working_model.rotate(random_angle, random_vector,
working_model.centre_of_mass)
else:
random_translation = random_vector * (numpy.random.rand() *
max_distance)
working_model.translate(random_translation)
proposed_energy = self.eval_fn(working_model, *self.eval_args)
move_accepted = self.check_move(proposed_energy,
self.current_energy, t=temp)
if move_accepted:
self.current_energy = proposed_energy
if self.current_energy < self.best_energy:
self.polypeptide = working_model
self.best_energy = copy.deepcopy(self.current_energy)
self.best_model = copy.deepcopy(working_model)
if verbose:
sys.stdout.write(
'\rRound: {}, Current RMSD: {}, Proposed RMSD: {} '
'(best {}), {}. '
.format(current_round, self.float_f(self.current_energy),
self.float_f(proposed_energy), self.float_f(
self.best_energy),
"ACCEPTED" if move_accepted else "DECLINED")
)
sys.stdout.flush()
current_round += 1
if stop_when:
if self.best_energy <= stop_when:
break
return
@staticmethod
def float_f(f):
"""Formats a float for printing to std out."""
return '{:.2f}'.format(f)
@staticmethod
def check_move(new, old, t):
"""Determines if a model will be accepted."""
if (t <= 0) or numpy.isclose(t, 0.0):
return False
K_BOLTZ = 1.9872041E-003 # kcal/mol.K
if new < old:
return True
else:
move_prob = math.exp(-(new - old) / (K_BOLTZ * t))
if move_prob > random.uniform(0, 1):
return True
return False
__author__ = 'Christopher W. Wood' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/align.py | align.py |
from collections import OrderedDict
from .base_ampal import Atom, Monomer, Polymer, write_pdb
from .geometry import distance, radius_of_circumcircle
class PseudoGroup(Polymer):
"""Container for `PseudoMonomer`, inherits from `Polymer`.
Parameters
----------
monomers : PseudoAtom or [PseudoGroup], optional
`PseudoMonomer` or list containing `PseudoMonomer` objects to form the
`PseudoGroup`.
polymer_id : str, optional
An ID that the user can use to identify the `PseudoGroup`. This is
used when generating a pdb file using `PseudoGroup().pdb`.
parent : ampal.Assembly, optional
Reference to `Assembly` containing the `PseudoGroup`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
`PseudoGroup` ID
parent : ampal.Assembly or None
Reference to `Assembly` containing the `PseudoGroup`
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`PseudoGroup` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
`Polymer` type objects can only be initialised empty or using
a `Monomer`.
"""
def __init__(self, monomers=None, polymer_id=' ', parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id,
molecule_type='pseudo_group', parent=parent, sl=sl)
def __repr__(self):
return '<PseudoGroup chain containing {} {}>'.format(
len(self._monomers),
'PseudoMonomer' if len(self._monomers) == 1 else 'PseudoMonomers')
class PseudoMonomer(Monomer):
"""Represents a collection of `PsuedoAtoms`.
Parameters
----------
pseudo_atoms : OrderedDict, optional
OrderedDict containing Atoms for the `PsuedoMonomer`. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the `PsuedoMonomer`.
monomer_id : str, optional
String used to identify the `PsuedoMonomer`.
insertion_code : str, optional
Insertion code of `PsuedoMonomer`, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
parent : ampal.PseudoGroup, optional
Reference to `PseudoGroup` containing the `PsuedoMonomer`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Nucleotide`.
insertion_code : str
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Nucleotide`.
id : str
String used to identify the `Nucleotide`.
reference_atom : str
The key that corresponds to the reference `Atom`. This is used
by various functions, for example backbone primitives are
calculated using the `Atom` defined using this key.
parent : Polynucleotide or None
A reference to the `Polynucleotide` containing this `Nucleotide`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, pseudo_atoms=None, mol_code='UNK',
monomer_id=' ', insertion_code=' ', parent=None):
super(PseudoMonomer, self).__init__(
atoms=pseudo_atoms, monomer_id=monomer_id,
parent=parent)
self.mol_code = mol_code
self.insertion_code = insertion_code
self.is_hetero = True
def __repr__(self):
return '<PseudoMonomer containing {} {}. PseudoMonomer code: {}>'.format(
len(self.atoms), 'PseudoAtom' if len(self.atoms) == 1 else 'PseudoAtoms', self.mol_code)
@property
def pdb(self):
"""Generates a PDB string for the `PseudoMonomer`."""
pdb_str = write_pdb(
[self], ' ' if not self.tags['chain_id'] else self.tags['chain_id'])
return pdb_str
class PseudoAtom(Atom):
"""Object containing 3D coordinates and name.
Notes
-----
Used to represent pseudo atoms (e.g. centre_of_mass) in ISAMBARD.
Parameters
----------
coordinates : 3D Vector (tuple, list, numpy.array)
Position of `PseudoAtom` in 3D space.
element : str
Element of `PseudoAtom`.
atom_id : str
Identifier for `PseudoAtom`, usually a number.
res_label : str, optional
Label used in `Monomer` to refer to the `PseudoAtom` type i.e.
"CA" or "OD1".
occupancy : float, optional
The occupancy of the `PseudoAtom`.
bfactor : float, optional
The bfactor of the `PseudoAtom`.
charge : str, optional
The point charge of the `PseudoAtom`.
state : str
The state of this `PseudoAtom`. Used to identify `PseudoAtoms`
with a number of conformations.
parent : ampal.Monomer, optional
A reference to the `Monomer` containing this `PseudoAtom`.
Attributes
----------
id : str
Identifier for `PseudoAtom`, usually a number.
res_label : str
Label used in `PseudoGroup` to refer to the `Atom` type i.e. "CA" or "OD1".
element : str
Element of `Atom`.
parent : ampal.PseudoAtom
A reference to the `PseudoGroup` containing this `PseudoAtom`.
number of conformations.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, coordinates, name='', occupancy=1.0, bfactor=1.0,
charge=' ', parent=None):
super().__init__(coordinates, element='C', atom_id=' ',
occupancy=occupancy, bfactor=bfactor,
charge=charge, state='A', parent=parent)
self.name = name
def __repr__(self):
return ("<PseudoAtom. Name: {}. Coordinates: "
"({:.3f}, {:.3f}, {:.3f})>".format(
self.name, self.x, self.y, self.z))
class Primitive(PseudoGroup):
"""A backbone path composed of `PseudoAtoms`.
Parameters
----------
pseudo_atoms : OrderedDict, optional
OrderedDict containing Atoms for the `PsuedoMonomer`. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the `PsuedoMonomer`.
monomer_id : str, optional
String used to identify the `PsuedoMonomer`.
insertion_code : str, optional
Insertion code of `PsuedoMonomer`, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
parent : ampal.PseudoGroup, optional
Reference to `PseudoGroup` containing the `PsuedoMonomer`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Nucleotide`.
insertion_code : str
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Nucleotide`.
id : str
String used to identify the `Nucleotide`.
reference_atom : str
The key that corresponds to the reference `Atom`. This is used
by various functions, for example backbone primitives are
calculated using the `Atom` defined using this key.
parent : Polynucleotide or None
A reference to the `Polynucleotide` containing this `Nucleotide`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, monomers=None, polymer_id=' ', parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id,
parent=parent, sl=sl)
def __repr__(self):
return '<Primitive chain containing {} {}>'.format(
len(self._monomers),
'PseudoMonomer' if len(self._monomers) == 1 else 'PseudoMonomers')
@classmethod
def from_coordinates(cls, coordinates):
"""Creates a `Primitive` from a list of coordinates."""
prim = cls()
for coord in coordinates:
pm = PseudoMonomer(parent=prim)
pa = PseudoAtom(coord, parent=pm)
pm.atoms = OrderedDict([('CA', pa)])
prim.append(pm)
prim.relabel_all()
return prim
@property
def coordinates(self):
"""Returns the backbone coordinates for the `Primitive`."""
return [x._vector for x in self.get_atoms()]
def rise_per_residue(self):
"""The rise per residue at each point on the Primitive.
Notes
-----
Each element of the returned list is the rise per residue,
at a point on the Primitive. Element i is the distance
between primitive[i] and primitive[i + 1]. The final value
is None.
"""
rprs = [distance(self[i]['CA'], self[i + 1]['CA'])
for i in range(len(self) - 1)]
rprs.append(None)
return rprs
def radii_of_curvature(self):
"""The radius of curvature at each point on the Polymer primitive.
Notes
-----
Each element of the returned list is the radius of curvature,
at a point on the Polymer primitive. Element i is the radius
of the circumcircle formed from indices [i-1, i, i+1] of the
primitve. The first and final values are None.
"""
rocs = []
for i, _ in enumerate(self):
if 0 < i < len(self) - 1:
rocs.append(radius_of_circumcircle(
self[i - 1]['CA'], self[i]['CA'], self[i + 1]['CA']))
else:
rocs.append(None)
return rocs
__author__ = 'Jack W. Heal' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/pseudo_atoms.py | pseudo_atoms.py |
import subprocess
import tempfile
from .assembly import Assembly
def dssp_available():
"""True if mkdssp is available on the path."""
available = False
try:
subprocess.check_output(['mkdssp'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
available = True
except FileNotFoundError:
print("DSSP has not been found on your path. If you have already "
"installed DSSP but are unsure how to add it to your path, "
"check out this: https://stackoverflow.com/a/14638025")
return available
def run_dssp(pdb, path=True):
"""Uses DSSP to find helices and extracts helices from a pdb file or string.
Parameters
----------
pdb : str
Path to pdb file or string.
path : bool, optional
Indicates if pdb is a path or a string.
Returns
-------
dssp_out : str
Std out from DSSP.
"""
if not path:
if isinstance(pdb, str):
pdb = pdb.encode()
with tempfile.NamedTemporaryFile() as temp_pdb:
temp_pdb.write(pdb)
temp_pdb.seek(0)
dssp_out = subprocess.check_output(
['mkdssp', temp_pdb.name])
else:
dssp_out = subprocess.check_output(
['mkdssp', pdb])
dssp_out = dssp_out.decode()
return dssp_out
def extract_all_ss_dssp(in_dssp, path=True):
"""Uses DSSP to extract secondary structure information on every residue.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool, optional
Indicates if pdb is a path or a string.
Returns
-------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
"""
if path:
with open(in_dssp, 'r') as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
active = False
for line in dssp_out.splitlines():
if active:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
ss_type = line[16]
phi = float(line[103:109].strip())
psi = float(line[109:116].strip())
acc = int(line[35:38].strip())
dssp_residues.append(
(res_num, ss_type, chain, residue, phi, psi, acc))
except ValueError:
pass
else:
if line[2] == '#':
active = True
return dssp_residues
def find_ss_regions(dssp_residues, loop_assignments=(' ', 'B', 'S', 'T')):
"""Separates parsed DSSP data into groups of secondary structure.
Notes
-----
Example: all residues in a single helix/loop/strand will be gathered
into a list, then the next secondary structure element will be
gathered into a separate list, and so on.
Parameters
----------
dssp_residues : [tuple]
Each internal list contains:
[0] int Residue number
[1] str Secondary structure type
[2] str Chain identifier
[3] str Residue type
[4] float Phi torsion angle
[5] float Psi torsion angle
[6] int dssp solvent accessibility
Returns
-------
fragments : [[list]]
Lists grouped in continuous regions of secondary structure.
Innermost list has the same format as above.
"""
loops = loop_assignments
previous_ele = None
fragment = []
fragments = []
for ele in dssp_residues:
if previous_ele is None:
fragment.append(ele)
elif ele[2] != previous_ele[2]:
fragments.append(fragment)
fragment = [ele]
elif previous_ele[1] in loops:
if ele[1] in loops:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
else:
if ele[1] == previous_ele[1]:
fragment.append(ele)
else:
fragments.append(fragment)
fragment = [ele]
previous_ele = ele
fragments.append(fragment)
return fragments
def tag_dssp_data(assembly, loop_assignments=(' ', 'B', 'S', 'T')):
"""Adds output data from DSSP to an Assembly.
A dictionary will be added to the `tags` dictionary of each
residue called `dssp_data`, which contains the secondary
structure definition, solvent accessibility phi and psi values
from DSSP. A list of regions of continuous secondary assignments
will also be added to each `Polypeptide`.
The tags are added in place, so nothing is returned from this
function.
Parameters
----------
assembly : ampal.Assembly
An Assembly containing some protein.
loop_assignments : tuple or list
A tuple containing the DSSP secondary structure identifiers to
that are classed as loop regions.
"""
dssp_out = run_dssp(assembly.pdb, path=False)
dssp_data = extract_all_ss_dssp(dssp_out, path=False)
for record in dssp_data:
rnum, sstype, chid, _, phi, psi, sacc = record
assembly[chid][str(rnum)].tags['dssp_data'] = {
'ss_definition': sstype,
'solvent_accessibility': sacc,
'phi': phi,
'psi': psi
}
ss_regions = find_ss_regions(dssp_data, loop_assignments)
for region in ss_regions:
chain = region[0][2]
ss_type = ' ' if region[0][1] in loop_assignments else region[0][1]
first_residue = str(region[0][0])
last_residue = str(region[-1][0])
if not 'ss_regions' in assembly[chain].tags:
assembly[chain].tags['ss_regions'] = []
assembly[chain].tags['ss_regions'].append(
(first_residue, last_residue, ss_type))
return
def get_ss_regions(assembly, ss_types):
"""Returns an Assembly containing Polymers for each region of structure.
Parameters
----------
assembly : ampal.Assembly
`Assembly` object to be searched secondary structure regions.
ss_types : list
List of secondary structure tags to be separate i.e. ['H']
would return helices, ['H', 'E'] would return helices
and strands.
Returns
-------
fragments : Assembly
`Assembly` containing a `Polymer` for each region of specified
secondary structure.
"""
if not any(map(lambda x: 'ss_regions' in x.tags, assembly)):
raise ValueError(
'This assembly does not have any tagged secondary structure '
'regions. Use `ampal.dssp.tag_dssp_data` to add the tags.'
)
fragments = Assembly()
for polypeptide in assembly:
if 'ss_regions' in polypeptide.tags:
for start, end, ss_type in polypeptide.tags['ss_regions']:
if ss_type in ss_types:
fragment = polypeptide.get_slice_from_res_id(start, end)
fragments.append(fragment)
if not fragments:
raise ValueError('No regions matching that secondary structure type'
' have been found. Use standard DSSP labels.')
return fragments
__author__ = "Christopher W. Wood, Gail J. Bartlett" | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/dssp.py | dssp.py |
from collections import OrderedDict
import warnings
import numpy
from ampal.base_ampal import Polymer, Monomer, Atom
from ampal.pseudo_atoms import Primitive
from ampal.analyse_protein import (
make_primitive_extrapolate_ends, measure_torsion_angles, residues_per_turn,
polymer_to_reference_axis_distances, crick_angles, alpha_angles,
sequence_molecular_weight, sequence_molar_extinction_280,
sequence_isoelectric_point, measure_sidechain_torsion_angles)
from ampal.interactions import (
generate_covalent_bond_graph, generate_bond_subgraphs_from_break,
find_covalent_bonds)
from .amino_acids import (
get_aa_code, get_aa_letter, ideal_backbone_bond_lengths,
ideal_backbone_bond_angles)
from .geometry import (
Quaternion, unit_vector, dihedral, find_transformations, distance,
angle_between_vectors)
from .ampal_warnings import MalformedPDBWarning
def flat_list_to_polymer(atom_list, atom_group_s=4):
"""Takes a flat list of atomic coordinates and converts it to a `Polymer`.
Parameters
----------
atom_list : [Atom]
Flat list of coordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coords converted `Monomers`.
Raises
------
ValueError
Raised if `atom_group_s` != 4 or 5
"""
atom_labels = ['N', 'CA', 'C', 'O', 'CB']
atom_elements = ['N', 'C', 'C', 'O', 'C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
if atom_group_s == 5:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'ALA')
for x in atoms]
elif atom_group_s == 4:
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'GLY')
for x in atoms]
else:
raise ValueError(
'Parameter atom_group_s must be 4 or 5 so atoms can be labeled correctly.')
polymer = Polypeptide(monomers=monomers)
return polymer
def flat_list_to_dummy_chain(atom_list, atom_group_s=1):
"""Converts flat list of coordinates into dummy C-alpha carbons
Parameters
----------
atom_list : [Atom]
Flat list of co-ordinates.
atom_group_s : int, optional
Size of atom groups.
Returns
-------
polymer : Polypeptide
`Polymer` object containing atom coord converted `Monomers`
with 'DUM' atom name.
"""
atom_labels = ['CA']
atom_elements = ['C']
atoms_coords = [atom_list[x:x + atom_group_s]
for x in range(0, len(atom_list), atom_group_s)]
atoms = [[Atom(x[0], x[1]) for x in zip(y, atom_elements)]
for y in atoms_coords]
monomers = [Residue(OrderedDict(zip(atom_labels, x)), 'DUM')
for x in atoms]
polymer = Polypeptide(monomers=monomers)
return polymer
def align(target, mobile, target_i=0, mobile_i=0):
"""Aligns one Polypeptide (mobile) to another (target).
Notes
-----
This function directly modifies atoms of the mobile Polypeptide!
It does not return a new object.
Parameters
----------
target : Polypeptide
Polypeptide to be aligned to.
mobile : Polypeptide
Polypeptide to be moved during alignment.
target_i : int, optional
Index of `Residue` in target to align to.
mobile_i : int, optional
Index of `Residue` in mobile to be aligned.
"""
# First, align N->CA vectors.
s1, e1, s2, e2 = [x._vector
for x in [mobile[mobile_i]['N'], mobile[mobile_i]['CA'],
target[target_i]['N'], target[target_i]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
# Rotation first, Then translation.
mobile.rotate(angle=angle, axis=axis, point=point, radians=False)
mobile.translate(vector=translation)
# Second, rotate about N->CA axis to align CA->C vectors.
angle = dihedral(mobile[mobile_i]['C'], mobile[mobile_i]
['N'], mobile[mobile_i]['CA'], target[target_i]['C'])
axis = target[target_i]['CA'] - target[target_i]['N']
point = target[target_i]['N']._vector
mobile.rotate(angle=angle, axis=axis, point=point)
return
class Polypeptide(Polymer):
"""Container for `Residues`, inherits from `Polymer`.
Parameters
----------
monomers : Residue or [Residue], optional
`Residue` or list containing `Residue` objects to form the
`Polypeptide`.
polymer_id : str, optional
An ID that the user can use to identify the `Polypeptide`. This is
used when generating a pdb file using `Polypeptide().pdb`.
parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
`Polypeptide` ID
parent : ampal.Assembly or None
Reference to `Assembly` containing the `Polypeptide`
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`Polypeptide` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
`Polymer` type objects can only be initialised empty or using
a `Monomer`.
"""
def __init__(self, monomers=None, polymer_id=' ', parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id, molecule_type='protein',
parent=parent, sl=sl)
def __add__(self, other):
if isinstance(other, Polymer):
merged_polymer = self._monomers + other._monomers
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer.')
return Polypeptide(monomers=merged_polymer, polymer_id=self.id)
def __getitem__(self, item):
if isinstance(item, str):
id_dict = {str(m.id): m for m in self._monomers}
return id_dict[item]
elif isinstance(item, int):
return self._monomers[item]
return Polypeptide(self._monomers[item], polymer_id=self.id)
def __repr__(self):
if len(self.sequence) > 15:
seq = self.sequence[:12] + '...'
else:
seq = self.sequence
return '<Polypeptide containing {} {}. Sequence: {}>'.format(
len(self._monomers),
'Residue' if len(self._monomers) == 1 else 'Residues', seq)
def get_slice_from_res_id(self, start, end):
"""Returns a new `Polypeptide` containing the `Residues` in start/end range.
Parameters
----------
start : str
string representing start residue id (PDB numbering)
end : str
string representing end residue id (PDB numbering)
Returns
-------
slice_polymer : Polymer
Polymer containing the residue range specified by start-end
"""
id_dict = {str(m.id): m for m in self._monomers}
slice_polymer = Polypeptide(
[id_dict[str(x)] for x in range(int(start), int(end) + 1)], self.id)
return slice_polymer
@property
def backbone(self):
"""Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer.
"""
bb_poly = Polypeptide([x.backbone for x in self._monomers], self.id)
return bb_poly
@property
def primitive(self):
"""Primitive of the backbone.
Notes
-----
This is the average of the positions of all the CAs in frames
of `sl` `Residues`.
"""
cas = self.get_reference_coords()
primitive_coords = make_primitive_extrapolate_ends(
cas, smoothing_level=self.sl)
primitive = Primitive.from_coordinates(primitive_coords)
primitive.relabel_monomers([x.id for x in self])
primitive.id = self.id
primitive.parent = self
return primitive
@property
def fasta(self):
"""Generates sequence data for the protein in FASTA format."""
max_line_length = 79
fasta_str = '>{0}:{1}|PDBID|CHAIN|SEQUENCE\n'.format(
self.parent.id.upper(), self.id)
seq = self.sequence
split_seq = [seq[i: i + max_line_length]
for i in range(0, len(seq), max_line_length)]
for seq_part in split_seq:
fasta_str += '{0}\n'.format(seq_part)
return fasta_str
@property
def sequence(self):
"""Returns the sequence of the `Polymer` as a string.
Returns
-------
sequence : str
String of the `Residue` sequence of the `Polypeptide`.
"""
seq = [x.mol_letter for x in self._monomers]
return ''.join(seq)
@property
def molecular_weight(self):
"""Returns the molecular weight of the `Assembly` in Daltons."""
return sequence_molecular_weight(self.sequence)
@property
def molar_extinction_280(self):
"""Returns the extinction co-efficient of the `Assembly` at 280 nm."""
return sequence_molar_extinction_280(self.sequence)
@property
def isoelectric_point(self):
"""Returns the isoelectric point of the `Assembly`."""
return sequence_isoelectric_point(self.sequence)
@property
def backbone_bond_lengths(self):
"""Dictionary containing backbone bond lengths as lists of floats.
Returns
-------
bond_lengths : dict
Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the
N-CA, CA-C, C=O and C-N bonds respectively. Values are
lists of floats : the bond lengths in Angstroms.
The lists of n_ca, ca_c and c_o are of length k for
a Polypeptide containing k Residues. The list of c_n bonds
is of length k-1 for a Polypeptide containing k Residues
(C-N formed between successive `Residue` pairs).
"""
bond_lengths = dict(
n_ca=[distance(r['N'], r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c=[distance(r['CA'], r['C'])
for r in self.get_monomers(ligands=False)],
c_o=[distance(r['C'], r['O'])
for r in self.get_monomers(ligands=False)],
c_n=[distance(r1['C'], r2['N']) for r1, r2 in [
(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_lengths
@property
def backbone_bond_angles(self):
"""Dictionary containing backbone bond angles as lists of floats.
Returns
-------
bond_angles : dict
Keys are `n_ca_c`, `ca_c_o`, `ca_c_n` and `c_n_ca`, referring
to the N-CA-C, CA-C=O, CA-C-N and C-N-CA angles respectively.
Values are lists of floats : the bond angles in degrees.
The lists of n_ca_c, ca_c_o are of length k for a `Polypeptide`
containing k `Residues`. The list of ca_c_n and c_n_ca are of
length k-1 for a `Polypeptide` containing k `Residues` (These
angles are across the peptide bond, and are therefore formed
between successive `Residue` pairs).
"""
bond_angles = dict(
n_ca_c=[angle_between_vectors(r['N'] - r['CA'], r['C'] - r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c_o=[angle_between_vectors(r['CA'] - r['C'], r['O'] - r['C'])
for r in self.get_monomers(ligands=False)],
ca_c_n=[angle_between_vectors(r1['CA'] - r1['C'], r2['N'] - r1['C'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
c_n_ca=[angle_between_vectors(r1['C'] - r2['N'], r2['CA'] - r2['N'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_angles
def c_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None,
relabel=True):
"""Joins other to self at the C-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float, optional
Psi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
omega: float, optional
Omega torsion angle (degrees) between final `Residue` of
self and first `Residue` of other.
phi: float, optional
Phi torsion angle (degrees) between final `Residue` of self
and first `Residue` of other.
o_c_n_angle: float or None, optional
Desired angle between O, C (final `Residue` of self) and N
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None, optional
Desired angle between C (final `Residue` of self) and N, CA
(first `Residue` of other) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_length: float or None, optional
Desired peptide bond length between final `Residue` of self
and first `Residue` of other. If `None`, default value is taken
from `ideal_backbone_bond_lengths`.
relabel: bool, optional
If `True`, `relabel_all` is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a Polypeptide.
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[-1]
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
r1_o = r1['O']._vector
# p1 is point that will be used to position the N atom of r2.
p1 = r1_o[:]
# rotate p1 by o_c_n_angle, about axis perpendicular to the
# r1_ca, r1_c, r1_o plane, passing through r1_c.
axis = numpy.cross((r1_ca - r1_c), (r1_o - r1_c))
q = Quaternion.angle_and_axis(angle=o_c_n_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_c)
# Ensure p1 is separated from r1_c by the correct distance.
p1 = r1_c + (c_n_length * unit_vector(p1 - r1_c))
# rotate p1 and r1['O'] by to obtain desired psi value at the join.
measured_psi = dihedral(r1['N'], r1['CA'], r1['C'], p1)
q = Quaternion.angle_and_axis(
angle=(psi - measured_psi), axis=(r1_c - r1_ca))
p1 = q.rotate_vector(v=p1, point=r1_c)
r1['O']._vector = q.rotate_vector(v=r1_o, point=r1_c)
# translate other so that its first N atom is at p1
other.translate(vector=(p1 - other[0]['N']._vector))
# rotate other so that c_n_ca angle is correct.
v1 = r1_c - other[0]['N']._vector
v2 = other[0]['CA']._vector - other[0]['N']._vector
measured_c_n_ca = angle_between_vectors(v1, v2)
axis = numpy.cross(v1, v2)
other.rotate(angle=(c_n_ca_angle - measured_c_n_ca),
axis=axis, point=other[0]['N']._vector)
# rotate other to obtain desired omega and phi values at the join
measured_omega = dihedral(
r1['CA'], r1['C'], other[0]['N'], other[0]['CA'])
other.rotate(angle=(omega - measured_omega),
axis=(other[0]['N'] - r1['C']), point=other[0]['N']._vector)
measured_phi = dihedral(
r1['C'], other[0]['N'], other[0]['CA'], other[0]['C'])
other.rotate(angle=(phi - measured_phi),
axis=(other[0]['CA'] - other[0]['N']), point=other[0]['CA']._vector)
self.extend(other)
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return
def n_join(self, other, psi=-40.76, omega=-178.25, phi=-65.07,
o_c_n_angle=None, c_n_ca_angle=None, c_n_length=None, relabel=True):
"""Joins other to self at the N-terminus via a peptide bond.
Notes
-----
This function directly modifies self. It does not return a new object.
Parameters
----------
other: Residue or Polypeptide
psi: float
Psi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
omega: float
Omega torsion angle (degrees) between final `Residue` of
other and first `Residue` of self.
phi: float
Phi torsion angle (degrees) between final `Residue` of other
and first `Residue` of self.
o_c_n_angle: float or None
Desired angle between O, C (final `Residue` of other) and N
(first `Residue` of self) atoms. If `None`, default value is
taken from `ideal_backbone_bond_angles`.
c_n_ca_angle: float or None
Desired angle between C (final `Residue` of other) and N, CA
(first `Residue` of self) atoms. If `None`, default value is taken
from `ideal_backbone_bond_angles`.
c_n_length: float or None
Desired peptide bond length between final `Residue` of other
and first `Residue` of self. If None, default value is taken
from ideal_backbone_bond_lengths.
relabel: bool
If True, relabel_all is run on self before returning.
Raises
------
TypeError:
If other is not a `Residue` or a `Polypeptide`
"""
if isinstance(other, Residue):
other = Polypeptide([other])
if not isinstance(other, Polypeptide):
raise TypeError(
'Only Polypeptide or Residue objects can be joined to a Polypeptide')
if abs(omega) >= 90:
peptide_conformation = 'trans'
else:
peptide_conformation = 'cis'
if o_c_n_angle is None:
o_c_n_angle = ideal_backbone_bond_angles[peptide_conformation]['o_c_n']
if c_n_ca_angle is None:
c_n_ca_angle = ideal_backbone_bond_angles[peptide_conformation]['c_n_ca']
if c_n_length is None:
c_n_length = ideal_backbone_bond_lengths['c_n']
r1 = self[0]
r1_n = r1['N']._vector
r1_ca = r1['CA']._vector
r1_c = r1['C']._vector
# p1 is point that will be used to position the C atom of r2.
p1 = r1_ca[:]
# rotate p1 by c_n_ca_angle, about axis perpendicular to the
# r1_n, r1_ca, r1_c plane, passing through r1_ca.
axis = numpy.cross((r1_ca - r1_n), (r1_c - r1_n))
q = Quaternion.angle_and_axis(angle=c_n_ca_angle, axis=axis)
p1 = q.rotate_vector(v=p1, point=r1_n)
# Ensure p1 is separated from r1_n by the correct distance.
p1 = r1_n + (c_n_length * unit_vector(p1 - r1_n))
# translate other so that its final C atom is at p1
other.translate(vector=(p1 - other[-1]['C']._vector))
# Force CA-C=O-N to be in a plane, and fix O=C-N angle accordingly
measured_dihedral = dihedral(
other[-1]['CA'], other[-1]['C'], other[-1]['O'], r1['N'])
desired_dihedral = 180.0
axis = other[-1]['O'] - other[-1]['C']
other.rotate(angle=(measured_dihedral - desired_dihedral),
axis=axis, point=other[-1]['C']._vector)
axis = (numpy.cross(other[-1]['O'] - other[-1]
['C'], r1['N'] - other[-1]['C']))
measured_o_c_n = angle_between_vectors(
other[-1]['O'] - other[-1]['C'], r1['N'] - other[-1]['C'])
other.rotate(angle=(measured_o_c_n - o_c_n_angle),
axis=axis, point=other[-1]['C']._vector)
# rotate other to obtain desired phi, omega, psi values at the join.
measured_phi = dihedral(other[-1]['C'], r1['N'], r1['CA'], r1['C'])
other.rotate(angle=(phi - measured_phi),
axis=(r1_n - r1_ca), point=r1_ca)
measured_omega = dihedral(
other[-1]['CA'], other[-1]['C'], r1['N'], r1['CA'])
other.rotate(angle=(measured_omega - omega),
axis=(r1['N'] - other[-1]['C']), point=r1_n)
measured_psi = dihedral(
other[-1]['N'], other[-1]['CA'], other[-1]['C'], r1['N'])
other.rotate(angle=-(measured_psi - psi), axis=(other[-1]['CA'] - other[-1]['C']),
point=other[-1]['CA']._vector)
self._monomers = other._monomers + self._monomers
if relabel:
self.relabel_all()
self.tags['assigned_ff'] = False
return
def tag_sidechain_dihedrals(self, force=False):
"""Tags each monomer with side-chain dihedral angles
force: bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['chi_angles' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
for monomer in self._monomers:
chi_angles = measure_sidechain_torsion_angles(
monomer, verbose=False)
monomer.tags['chi_angles'] = chi_angles
return
def tag_torsion_angles(self, force=False):
"""Tags each Monomer of the Polymer with its omega, phi and psi torsion angle.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are
already tagged.
"""
tagged = ['omega' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
tas = measure_torsion_angles(self._monomers)
for monomer, (omega, phi, psi) in zip(self._monomers, tas):
monomer.tags['omega'] = omega
monomer.tags['phi'] = phi
monomer.tags['psi'] = psi
monomer.tags['tas'] = (omega, phi, psi)
return
def rise_per_residue(self):
"""List of rise per residue values along the `Polypeptide`.
Notes
-----
Calculated from `Polypeptide.primitive`."""
return self.primitive.rise_per_residue()
def radii_of_curvature(self):
""" List of radius of curvature values along the `Polypeptide`."""
return self.primitive.radii_of_curvature()
def tag_ca_geometry(self, force=False, reference_axis=None,
reference_axis_name='ref_axis'):
"""Tags each `Residue` with rise_per_residue, radius_of_curvature and residues_per_turn.
Parameters
----------
force : bool, optional
If `True` the tag will be run even if `Residues` are already
tagged.
reference_axis : list(numpy.array or tuple or list), optional
Coordinates to feed to geometry functions that depend on
having a reference axis.
reference_axis_name : str, optional
Used to name the keys in tags at `Polypeptide` and `Residue` level.
"""
tagged = ['rise_per_residue' in x.tags.keys() for x in self._monomers]
if (not all(tagged)) or force:
# Assign tags None if Polymer is too short to have a primitive.
if len(self) < 7:
rprs = [None] * len(self)
rocs = [None] * len(self)
rpts = [None] * len(self)
else:
rprs = self.rise_per_residue()
rocs = self.radii_of_curvature()
rpts = residues_per_turn(self)
for monomer, rpr, roc, rpt in zip(self._monomers, rprs, rocs, rpts):
monomer.tags['rise_per_residue'] = rpr
monomer.tags['radius_of_curvature'] = roc
monomer.tags['residues_per_turn'] = rpt
# Functions that require a reference_axis.
if (reference_axis is not None) and (len(reference_axis) == len(self)):
# Set up arguments to pass to functions.
ref_axis_args = dict(p=self,
reference_axis=reference_axis,
tag=True,
reference_axis_name=reference_axis_name)
# Run the functions.
polymer_to_reference_axis_distances(**ref_axis_args)
crick_angles(**ref_axis_args)
alpha_angles(**ref_axis_args)
return
def valid_backbone_bond_lengths(self, atol=0.1):
"""True if all backbone bonds are within atol Angstroms of the expected distance.
Notes
-----
Ideal bond lengths taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in Angstoms for the absolute deviation
away from ideal backbone bond lengths.
"""
bond_lengths = self.backbone_bond_lengths
a1 = numpy.allclose(bond_lengths['n_ca'],
[ideal_backbone_bond_lengths['n_ca']] * len(self),
atol=atol)
a2 = numpy.allclose(bond_lengths['ca_c'],
[ideal_backbone_bond_lengths['ca_c']] * len(self),
atol=atol)
a3 = numpy.allclose(bond_lengths['c_o'],
[ideal_backbone_bond_lengths['c_o']] * len(self),
atol=atol)
a4 = numpy.allclose(bond_lengths['c_n'],
[ideal_backbone_bond_lengths['c_n']] *
(len(self) - 1),
atol=atol)
return all([a1, a2, a3, a4])
def valid_backbone_bond_angles(self, atol=20):
"""True if all backbone bond angles are within atol degrees of their expected values.
Notes
-----
Ideal bond angles taken from [1].
References
----------
.. [1] Schulz, G. E, and R. Heiner Schirmer. Principles Of
Protein Structure. New York: Springer-Verlag, 1979.
Parameters
----------
atol : float, optional
Tolerance value in degrees for the absolute deviation
away from ideal backbone bond angles.
"""
bond_angles = self.backbone_bond_angles
omegas = [x[0] for x in measure_torsion_angles(self)]
trans = ['trans' if (omega is None) or (
abs(omega) >= 90) else 'cis' for omega in omegas]
ideal_n_ca_c = [ideal_backbone_bond_angles[x]['n_ca_c'] for x in trans]
ideal_ca_c_o = [ideal_backbone_bond_angles[trans[i + 1]]
['ca_c_o'] for i in range(len(trans) - 1)]
ideal_ca_c_o.append(ideal_backbone_bond_angles['trans']['ca_c_o'])
ideal_ca_c_n = [ideal_backbone_bond_angles[x]['ca_c_n']
for x in trans[1:]]
ideal_c_n_ca = [ideal_backbone_bond_angles[x]['c_n_ca']
for x in trans[1:]]
a1 = numpy.allclose(bond_angles['n_ca_c'], [ideal_n_ca_c], atol=atol)
a2 = numpy.allclose(bond_angles['ca_c_o'], [ideal_ca_c_o], atol=atol)
a3 = numpy.allclose(bond_angles['ca_c_n'], [ideal_ca_c_n], atol=atol)
a4 = numpy.allclose(bond_angles['c_n_ca'], [ideal_c_n_ca], atol=atol)
return all([a1, a2, a3, a4])
class Residue(Monomer):
"""Represents a amino acid `Residue`.
Parameters
----------
atoms : OrderedDict, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the monomer.
monomer_id : str, optional
String used to identify the residue.
insertion_code : str, optional
Insertion code of monomer, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
parent : ampal.Polypeptide, optional
Reference to `Polypeptide` containing the `Residue`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Residue`.
insertion_code : str
Insertion code of `Residue`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Residue`.
id : str
String used to identify the residue.
reference_atom : str
The key that corresponds to the reference atom. This is used
by various functions, for example backbone primitives are
calculated using the atom defined using this key.
parent : Polypeptide or None
A reference to the `Polypeptide` containing this `Residue`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(self, atoms=None, mol_code='UNK', monomer_id=' ',
insertion_code=' ', is_hetero=False, parent=None):
super(Residue, self).__init__(
atoms, monomer_id, parent=parent)
if len(mol_code) == 3:
self.mol_code = mol_code
self.mol_letter = get_aa_letter(mol_code)
elif len(mol_code) == 1:
self.mol_code = get_aa_code(mol_code)
self.mol_letter = mol_code
else:
raise ValueError(
'Monomer requires either a 1-letter or a 3-letter '
'amino acid code ({})'.format(mol_code))
self.insertion_code = insertion_code
self.is_hetero = is_hetero
self.reference_atom = 'CA'
def __repr__(self):
return '<Residue containing {} {}. Residue code: {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms', self.mol_code)
@property
def backbone(self):
"""Returns a new `Residue` containing only the backbone atoms.
Returns
-------
bb_monomer : Residue
`Residue` containing only the backbone atoms of the original
`Monomer`.
Raises
------
IndexError
Raise if the `atoms` dict does not contain the backbone
atoms (N, CA, C, O).
"""
try:
backbone = OrderedDict([('N', self.atoms['N']),
('CA', self.atoms['CA']),
('C', self.atoms['C']),
('O', self.atoms['O'])])
except KeyError:
missing_atoms = filter(lambda x: x not in self.atoms.keys(),
('N', 'CA', 'C', 'O')
)
raise KeyError('Error in residue {} {} {}, missing ({}) atoms. '
'`atoms` must be an `OrderedDict` with coordinates '
'defined for the backbone (N, CA, C, O) atoms.'
.format(self.parent.id, self.mol_code,
self.id, ', '.join(missing_atoms)))
bb_monomer = Residue(backbone, self.mol_code, monomer_id=self.id,
insertion_code=self.insertion_code,
is_hetero=self.is_hetero)
return bb_monomer
@property
def unique_id(self):
"""Generates a tuple that uniquely identifies a `Monomer` in an `Assembly`.
Notes
-----
The unique_id will uniquely identify each monomer within a polymer.
If each polymer in an assembly has a distinct id, it will uniquely
identify each monomer within the assembly.
The hetero-flag is defined as in Biopython as a string that is
either a single whitespace in the case of a non-hetero atom,
or 'H_' plus the name of the hetero-residue (e.g. 'H_GLC' in
the case of a glucose molecule), or 'W' in the case of a water
molecule.
For more information, see the Biopython documentation or this
Biopython wiki page:
http://biopython.org/wiki/The_Biopython_Structural_Bioinformatics_FAQ
Returns
-------
unique_id : tuple
unique_id[0] is the polymer_id unique_id[1] is a triple
of the hetero-flag, the monomer id (residue number) and the
insertion code.
"""
if self.is_hetero:
if self.mol_code == 'HOH':
hetero_flag = 'W'
else:
hetero_flag = 'H_{0}'.format(self.mol_code)
else:
hetero_flag = ' '
return self.parent.id, (hetero_flag, self.id, self.insertion_code)
@property
def side_chain(self):
"""List of the side-chain atoms (R-group).
Notes
-----
Returns empty list for glycine.
Returns
-------
side_chain_atoms: list(`Atoms`)
"""
side_chain_atoms = []
if self.mol_code != 'GLY':
covalent_bond_graph = generate_covalent_bond_graph(
find_covalent_bonds(self))
try:
subgraphs = generate_bond_subgraphs_from_break(
covalent_bond_graph, self['CA'], self['CB'])
if len(subgraphs) == 1:
subgraphs = generate_bond_subgraphs_from_break(
subgraphs[0], self['CD'], self['N'])
if len(subgraphs) == 2:
for g in subgraphs:
if self['CB'] in g:
side_chain_atoms = g.nodes()
break
except:
warning_message = "Malformed PDB for Residue {0}: {1}.".format(
self.id, self)
if 'CB' in self.atoms.keys():
side_chain_atoms.append(self['CB'])
warning_message += " Side-chain is just the CB atom."
else:
warning_message += " Empty side-chain."
warnings.warn(warning_message, MalformedPDBWarning)
return side_chain_atoms
@property
def centroid(self):
"""Calculates the centroid of the residue.
Returns
-------
centroid : numpy.array or None
Returns a 3D coordinate for the residue unless a CB
atom is not available, in which case `None` is
returned.
Notes
-----
Uses the definition of the centroid from Huang *et al* [2]_.
References
----------
.. [2] Huang ES, Subbiah S and Levitt M (1995) Recognizing Native
Folds by the Arrangement of Hydrophobic and Polar Residues, J. Mol.
Biol return., **252**, 709-720.
"""
if 'CB' in self.atoms:
cb_unit_vector = unit_vector(
self['CB']._vector - self['CA']._vector)
return self['CA']._vector + (3.0 * cb_unit_vector)
return None
__author__ = ('Jack W. Heal, Christopher W. Wood, Gail J. Bartlett, '
'Andrew R. Thomson, Kieran L. Hudson') | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/protein.py | protein.py |
from ampal.base_ampal import Polymer, Monomer
class Polynucleotide(Polymer):
"""`Polymer` type object that represents a `Polynucleotide`.
Parameters
----------
monomers : Nucleotide or [Nucleotide], optional
`Nucleotide` or list containing `Nucleotide` objects to form the
`Polynucleotide`.
polymer_id : str, optional
An ID that the user can use to identify the `Polynucleotide`. This is
used when generating a pdb file using `Polynucleotide().pdb`.
parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polynucleotide`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
`Polynucleotide` ID
parent : ampal.Assembly or None
Reference to `Assembly` containing the `Polynucleotide`
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`Polynucleotide` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
`Polymer` type objects can only be initialised empty or using
a `Monomer`.
"""
def __init__(self, monomers=None, polymer_id=' ', parent=None, sl=2):
super().__init__(
monomers=monomers, polymer_id=polymer_id,
molecule_type='nucleic_acid', parent=parent, sl=sl)
@property
def sequence(self):
"""Returns the sequence of the `Polynucleotide` as a string.
Returns
-------
sequence : str
String of the monomer sequence of the `Polynucleotide`.
"""
seq = [x.mol_code for x in self._monomers]
return ' '.join(seq)
class Nucleotide(Monomer):
"""Represents a nucleotide base.
Parameters
----------
atoms : OrderedDict, optional
OrderedDict containing Atoms for the `Nucleotide`. OrderedDict
is used to maintain the order items were added to the
dictionary.
mol_code : str, optional
One or three letter code that represents the `Nucleotide`.
monomer_id : str, optional
String used to identify the `Nucleotide`.
insertion_code : str, optional
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool, optional
True if is a hetero atom in pdb. Helps with PDB formatting.
parent : ampal.Polynucleotide, optional
Reference to `Polynucleotide` containing the `Nucleotide`.
Attributes
----------
mol_code : str
PDB molecule code that represents the `Nucleotide`.
insertion_code : str
Insertion code of `Nucleotide`, used if reading from pdb.
is_hetero : bool
True if is a hetero atom in pdb. Helps with PDB formatting.
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Nucleotide`.
id : str
String used to identify the `Nucleotide`.
reference_atom : str
The key that corresponds to the reference `Atom`. This is used
by various functions, for example backbone primitives are
calculated using the `Atom` defined using this key.
parent : Polynucleotide or None
A reference to the `Polynucleotide` containing this `Nucleotide`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
Raises
------
ValueError
Raised if `mol_code` is not length 1 or 3.
"""
def __init__(
self, atoms=None, mol_code='UNK', monomer_id=' ',
insertion_code=' ', is_hetero=False, parent=None):
super().__init__(atoms, monomer_id, parent=parent)
self.mol_code = mol_code
self.mol_letter = mol_code[-1]
self.insertion_code = insertion_code
self.is_hetero = is_hetero
self.reference_atom = 'P'
__author__ = "Christopher W. Wood" | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/nucleic_acid.py | nucleic_acid.py |
from collections import Counter
import warnings
import numpy
from .pseudo_atoms import Primitive
from .geometry import (angle_between_vectors, dihedral, distance,
find_foot, unit_vector, is_acute)
from .amino_acids import (residue_mwt, water_mass, residue_ext_280,
residue_pka, residue_charge, side_chain_dihedrals)
from .ampal_warnings import NoncanonicalWarning
_nc_warning_str = 'Unnatural amino acid detected, this value may be inaccurate.'
def sequence_molecular_weight(seq):
"""Returns the molecular weight of the polypeptide sequence.
Notes
-----
Units = Daltons
Parameters
----------
seq : str
Sequence of amino acids.
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
return sum(
[residue_mwt[aa] * n for aa, n in Counter(seq).items()]) + water_mass
# TODO How to account for cystine Vs cysteine.
# (when there's a disuplhide bond, ext coefficient changes to 125.
def sequence_molar_extinction_280(seq):
"""Returns the molar extinction coefficient of the sequence at 280 nm.
Notes
-----
Units = M/cm
Parameters
----------
seq : str
Sequence of amino acids.
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
return sum([residue_ext_280[aa] * n for aa, n in Counter(seq).items()])
def partial_charge(aa, pH):
"""Calculates the partial charge of the amino acid.
Parameters
----------
aa : str
Amino acid single-letter code.
pH : float
pH of interest.
"""
difference = pH - residue_pka[aa]
if residue_charge[aa] > 0:
difference *= -1
ratio = (10 ** difference) / (1 + 10 ** difference)
return ratio
def sequence_charge(seq, pH=7.4):
"""Calculates the total charge of the input polypeptide sequence.
Parameters
----------
seq : str
Sequence of amino acids.
pH : float
pH of interest.
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
adj_protein_charge = sum(
[partial_charge(aa, pH) * residue_charge[aa] * n
for aa, n in Counter(seq).items()])
adj_protein_charge += (
partial_charge('N-term', pH) * residue_charge['N-term'])
adj_protein_charge += (
partial_charge('C-term', pH) * residue_charge['C-term'])
return adj_protein_charge
def charge_series(seq, granularity=0.1):
"""Calculates the charge for pH 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range = numpy.arange(1, 13, granularity)
charge_at_ph = [sequence_charge(seq, ph) for ph in ph_range]
return ph_range, charge_at_ph
def sequence_isoelectric_point(seq, granularity=0.1):
"""Calculates the isoelectric point of the sequence for ph 1-13.
Parameters
----------
seq : str
Sequence of amino acids.
granularity : float, optional
Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
"""
if 'X' in seq:
warnings.warn(_nc_warning_str, NoncanonicalWarning)
ph_range, charge_at_ph = charge_series(seq, granularity)
abs_charge_at_ph = [abs(ch) for ch in charge_at_ph]
pi_index = min(enumerate(abs_charge_at_ph), key=lambda x: x[1])[0]
return ph_range[pi_index]
def measure_sidechain_torsion_angles(residue, verbose=True):
"""Calculates sidechain dihedral angles for a residue
Parameters
----------
residue : [ampal.Residue]
`Residue` object.
verbose : bool, optional
If `true`, tells you when a residue does not have any known
dihedral angles to measure.
Returns
-------
chi_angles: [float]
Length depends on residue type, in range [-pi, pi]
[0] = chi1 [if applicable]
[1] = chi2 [if applicable]
[2] = chi3 [if applicable]
[3] = chi4 [if applicable]
"""
chi_angles = []
aa = residue.mol_code
if aa not in side_chain_dihedrals:
if verbose:
print("Amino acid {} has no known side-chain dihedral".format(aa))
else:
for set_atoms in side_chain_dihedrals[aa]:
required_for_dihedral = set_atoms[0:4]
try:
angle = dihedral(
residue[required_for_dihedral[0]]._vector,
residue[required_for_dihedral[1]]._vector,
residue[required_for_dihedral[2]]._vector,
residue[required_for_dihedral[3]]._vector)
chi_angles.append(angle)
except KeyError as k:
print("{0} atom missing from residue {1} {2} "
"- can't assign dihedral".format(
k, residue.mol_code, residue.id))
chi_angles.append(None)
return chi_angles
def measure_torsion_angles(residues):
"""Calculates the dihedral angles for a list of backbone atoms.
Parameters
----------
residues : [ampal.Residue]
List of `Residue` objects.
Returns
-------
torsion_angles : (float, float, float)
One triple for each residue, containing torsion angles in
the range [-pi, pi].
[0] omega
[1] phi
[2] psi
For the first residue, omega and phi are not defined. For
the final residue, psi is not defined.
Raises
------
ValueError
If the number of input residues is less than 2.
"""
if len(residues) < 2:
torsion_angles = [(None, None, None)] * len(residues)
else:
torsion_angles = []
for i in range(len(residues)):
if i == 0:
res1 = residues[i]
res2 = residues[i + 1]
omega = None
phi = None
try:
psi = dihedral(
res1['N']._vector, res1['CA']._vector,
res1['C']._vector, res2['N']._vector)
except KeyError as k:
print("{0} atom missing - can't assign psi".format(k))
psi = None
torsion_angles.append((omega, phi, psi))
elif i == len(residues) - 1:
res1 = residues[i - 1]
res2 = residues[i]
try:
omega = dihedral(
res1['CA']._vector, res1['C']._vector,
res2['N']._vector, res2['CA']._vector)
except KeyError as k:
print("{0} atom missing - can't assign omega".format(k))
omega = None
try:
phi = dihedral(
res1['C']._vector, res2['N']._vector,
res2['CA']._vector, res2['C']._vector)
except KeyError as k:
print("{0} atom missing - can't assign phi".format(k))
phi = None
psi = None
torsion_angles.append((omega, phi, psi))
else:
res1 = residues[i - 1]
res2 = residues[i]
res3 = residues[i + 1]
try:
omega = dihedral(
res1['CA']._vector, res1['C']._vector,
res2['N']._vector, res2['CA']._vector)
except KeyError as k:
print("{0} atom missing - can't assign omega".format(k))
omega = None
try:
phi = dihedral(
res1['C']._vector, res2['N']._vector,
res2['CA']._vector, res2['C']._vector)
except KeyError as k:
print("{0} atom missing - can't assign phi".format(k))
phi = None
try:
psi = dihedral(
res2['N']._vector, res2['CA']._vector,
res2['C']._vector, res3['N']._vector)
except KeyError as k:
print("{0} atom missing - can't assign psi".format(k))
psi = None
torsion_angles.append((omega, phi, psi))
return torsion_angles
# TODO: Find a home for this
def cc_to_local_params(pitch, radius, oligo):
"""Returns local parameters for an oligomeric assembly.
Parameters
----------
pitch : float
Pitch of assembly
radius : float
Radius of assembly
oligo : int
Oligomeric state of assembly
Returns
-------
pitchloc : float
Local pitch of assembly (between 2 adjacent component helices)
rloc : float
Local radius of assembly
alphaloc : float
Local pitch-angle of assembly
"""
rloc = numpy.sin(numpy.pi / oligo) * radius
alpha = numpy.arctan((2 * numpy.pi * radius) / pitch)
alphaloc = numpy.cos((numpy.pi / 2) - ((numpy.pi) / oligo)) * alpha
pitchloc = (2 * numpy.pi * rloc) / numpy.tan(alphaloc)
return pitchloc, rloc, numpy.rad2deg(alphaloc)
def residues_per_turn(p):
""" The number of residues per turn at each Monomer in the Polymer.
Notes
-----
Each element of the returned list is the number of residues
per turn, at a point on the Polymer primitive. Calculated using
the relative positions of the CA atoms and the primitive of the
Polymer. Element i is the calculated from the dihedral angle using
the CA atoms of the Monomers with indices [i, i+1] and the
corresponding atoms of the primitive. The final value is None.
Parameters
----------
p : ampal.Polypeptide
`Polypeptide` from which residues per turn will be calculated.
Returns
-------
rpts : [float]
Residue per turn values.
"""
cas = p.get_reference_coords()
prim_cas = p.primitive.coordinates
dhs = [abs(dihedral(cas[i], prim_cas[i], prim_cas[i + 1], cas[i + 1]))
for i in range(len(prim_cas) - 1)]
rpts = [360.0 / dh for dh in dhs]
rpts.append(None)
return rpts
def polymer_to_reference_axis_distances(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
"""Returns distances between the primitive of a Polymer and a reference_axis.
Notes
-----
Distances are calculated between each point of the Polymer primitive
and the corresponding point in reference_axis. In the special case
of the helical barrel, if the Polymer is a helix and the reference_axis
represents the centre of the barrel, then this function returns the
radius of the barrel at each point on the helix primitive. The points
of the primitive and the reference_axis are run through in the same
order, so take care with the relative orientation of the reference
axis when defining it.
Parameters
----------
p : ampal.Polymer
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If True, tags the Chain with the reference axis coordinates
and each Residue with its distance to the ref axis.
Distances are stored at the Residue level, but refer to
distances from the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
distances : list(float)
Distance values between corresponding points on the
reference axis and the `Polymer` `Primitive`.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length.
"""
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points "
"as the Polymer primitive.")
prim_cas = p.primitive.coordinates
ref_points = reference_axis.coordinates
distances = [distance(prim_cas[i], ref_points[i])
for i in range(len(prim_cas))]
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'distance_to_{0}'.format(reference_axis_name)
for m, d in zip(p._monomers, distances):
m.tags[monomer_tag_name] = d
return distances
def crick_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
"""Returns the Crick angle for each CA atom in the `Polymer`.
Notes
-----
The final value is in the returned list is `None`, since the angle
calculation requires pairs of points on both the primitive and
reference_axis.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the `Polymer` with the reference axis coordinates
and each Residue with its Crick angle. Crick angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
cr_angles : list(float)
The crick angles in degrees for each CA atom of the Polymer.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length.
"""
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points"
" as the Polymer primitive.")
prim_cas = p.primitive.coordinates
p_cas = p.get_reference_coords()
ref_points = reference_axis.coordinates
cr_angles = [
dihedral(ref_points[i], prim_cas[i], prim_cas[i + 1], p_cas[i])
for i in range(len(prim_cas) - 1)]
cr_angles.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'crick_angle_{0}'.format(reference_axis_name)
for m, c in zip(p._monomers, cr_angles):
m.tags[monomer_tag_name] = c
return cr_angles
def alpha_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'):
"""Alpha angle calculated using points on the primitive of helix and axis.
Notes
-----
The final value is None, since the angle calculation requires pairs
of points along the primitive and axis. This is a generalisation
of the calculation used to measure the tilt of a helix in a
coiled-coil with respect to the central axis of the coiled coil.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
tag : bool, optional
If `True`, tags the Chain with the reference axis coordinates
and each Residue with its alpha angle. Alpha angles are stored
at the Residue level, but are calculated using the CA atom.
reference_axis_name : str, optional
Used to name the keys in tags at Chain and Residue level.
Returns
-------
alphas : list of float
The alpha angle for the Polymer at each point of its primitive,
in degrees.
Raises
------
ValueError
If the Polymer and the reference_axis have unequal length.
"""
if not len(p) == len(reference_axis):
raise ValueError(
"The reference axis must contain the same number of points "
"as the Polymer primitive.")
prim_cas = p.primitive.coordinates
ref_points = reference_axis.coordinates
alphas = [abs(dihedral(ref_points[i + 1], ref_points[i], prim_cas[i], prim_cas[i + 1]))
for i in range(len(prim_cas) - 1)]
alphas.append(None)
if tag:
p.tags[reference_axis_name] = reference_axis
monomer_tag_name = 'alpha_angle_{0}'.format(reference_axis_name)
for m, a in zip(p._monomers, alphas):
m.tags[monomer_tag_name] = a
return alphas
def polypeptide_vector(p, start_index=0, end_index=-1, unit=True):
"""Vector along the Chain primitive (default is from N-terminus to C-terminus).
Notes
-----
`start_index` and `end_index` can be changed to examine smaller
sections of the Chain, or reversed to change the direction of
the vector.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
start_index : int, optional
Default is 0 (start at the N-terminus of the Chain)
end_index : int, optional
Default is -1 (start at the C-terminus of the Chain)
unit : bool
If True, the vector returned has a magnitude of 1.
Returns
-------
vector : a numpy.array
vector has shape (1, 3)
"""
if len(p) <= 1:
raise ValueError(
"Polymer should have length greater than 1. Polymer length = {0}".format(len(p)))
try:
prim_cas = p.primitive.coordinates
direction_vector = prim_cas[end_index] - prim_cas[start_index]
except ValueError:
direction_vector = p[end_index]['CA'].array - \
p[start_index]['CA'].array
if unit:
direction_vector = unit_vector(direction_vector)
return direction_vector
# TODO Change functionality so that a Primitive object is returned.
# (e.g. all CA ALA atoms like with primitives).
def reference_axis_from_chains(chains):
"""Average coordinates from a set of primitives calculated from Chains.
Parameters
----------
chains : list(Chain)
Returns
-------
reference_axis : numpy.array
The averaged (x, y, z) coordinates of the primitives for
the list of Chains. In the case of a coiled coil barrel,
this would give the central axis for calculating e.g. Crick
angles.
Raises
------
ValueError :
If the Chains are not all of the same length.
"""
if not len(set([len(x) for x in chains])) == 1:
raise ValueError("All chains must be of the same length")
# First array in coords is the primitive coordinates of the first chain.
# The orientation of the first chain orients the reference_axis.
coords = [numpy.array(chains[0].primitive.coordinates)]
orient_vector = polypeptide_vector(chains[0])
# Append the coordinates for the remaining chains, reversing the
# direction in antiparallel arrangements.
for i, c in enumerate(chains[1:]):
if is_acute(polypeptide_vector(c), orient_vector):
coords.append(numpy.array(c.primitive.coordinates))
else:
coords.append(numpy.flipud(numpy.array(c.primitive.coordinates)))
# Average across the x, y and z coordinates to get the reference_axis
# coordinates
reference_axis = numpy.mean(numpy.array(coords), axis=0)
return Primitive.from_coordinates(reference_axis)
def flip_reference_axis_if_antiparallel(
p, reference_axis, start_index=0, end_index=-1):
"""Flips reference axis if direction opposes the direction of the `Polymer`.
Notes
-----
If the angle between the vector for the Polymer and the vector
for the reference_axis is > 90 degrees, then the reference axis
is reversed. This is useful to run before running
polymer_to_reference_axis_distances, crick_angles, or alpha_angles.
For more information on the start and end indices, see chain_vector.
Parameters
----------
p : ampal.Polymer
Reference `Polymer`.
reference_axis : list(numpy.array or tuple or list)
Length of reference_axis must equal length of the Polymer.
Each element of reference_axis represents a point in R^3.
start_index : int, optional
Default is 0 (start at the N-terminus of the Polymer)
end_index : int, optional
Default is -1 (start at the C-terminus of the Polymer)
Returns
-------
reference_axis : list(numpy.array or tuple or list)
"""
p_vector = polypeptide_vector(
p, start_index=start_index, end_index=end_index)
if is_acute(p_vector,
reference_axis[end_index] - reference_axis[start_index]):
reference_axis = numpy.flipud(reference_axis)
return reference_axis
def make_primitive(cas_coords, window_length=3):
"""Calculates running average of cas_coords with a fixed averaging window_length.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
window_length : int, optional
The number of coordinate sets to average each time.
Returns
-------
s_primitive : list(numpy.array)
Each array has length 3.
Raises
------
ValueError
If the length of cas_coords is smaller than the window_length.
"""
if len(cas_coords) >= window_length:
primitive = []
count = 0
for _ in cas_coords[:-(window_length - 1)]:
group = cas_coords[count:count + window_length]
average_x = sum([x[0] for x in group]) / window_length
average_y = sum([y[1] for y in group]) / window_length
average_z = sum([z[2] for z in group]) / window_length
primitive.append(numpy.array([average_x, average_y, average_z]))
count += 1
else:
raise ValueError(
'A primitive cannot be generated for {0} atoms using a (too large) '
'averaging window_length of {1}.'.format(
len(cas_coords), window_length))
return primitive
def make_primitive_smoothed(cas_coords, smoothing_level=2):
""" Generates smoothed primitive from a list of coordinates.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int, optional
Number of times to run the averaging.
Returns
-------
s_primitive : list(numpy.array)
Each array has length 3.
Raises
------
ValueError
If the smoothing level is too great compared to the length
of cas_coords.
"""
try:
s_primitive = make_primitive(cas_coords)
for x in range(smoothing_level):
s_primitive = make_primitive(s_primitive)
except ValueError:
raise ValueError(
'Smoothing level {0} too high, try reducing the number of rounds'
' or give a longer Chain (curent length = {1}).'.format(
smoothing_level, len(cas_coords)))
return s_primitive
def make_primitive_extrapolate_ends(cas_coords, smoothing_level=2):
"""Generates smoothed helix primitives and extrapolates lost ends.
Notes
-----
From an input list of CA coordinates, the running average is
calculated to form a primitive. The smoothing_level dictates how
many times to calculate the running average. A higher
smoothing_level generates a 'smoother' primitive - i.e. the
points on the primitive more closely fit a smooth curve in R^3.
Each time the smoothing level is increased by 1, a point is lost
from either end of the primitive. To correct for this, the primitive
is extrapolated at the ends to approximate the lost values. There
is a trade-off then between the smoothness of the primitive and
its accuracy at the ends.
Parameters
----------
cas_coords : list(numpy.array or float or tuple)
Each element of the list must have length 3.
smoothing_level : int
Number of times to run the averaging.
Returns
-------
final_primitive : list(numpy.array)
Each array has length 3.
"""
try:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level)
except ValueError:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
# if returned smoothed primitive is too short, lower the smoothing
# level and try again.
if len(smoothed_primitive) < 3:
smoothed_primitive = make_primitive_smoothed(
cas_coords, smoothing_level=smoothing_level - 1)
final_primitive = []
for ca in cas_coords:
prim_dists = [distance(ca, p) for p in smoothed_primitive]
closest_indices = sorted([x[0] for x in sorted(
enumerate(prim_dists), key=lambda k: k[1])[:3]])
a, b, c = [smoothed_primitive[x] for x in closest_indices]
ab_foot = find_foot(a, b, ca)
bc_foot = find_foot(b, c, ca)
ca_foot = (ab_foot + bc_foot) / 2
final_primitive.append(ca_foot)
return final_primitive
__author__ = ('Jack W. Heal, Christopher W. Wood, Gail J. Bartlett, '
'Derek N. Woolfson, Kieran L. Hudson') | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/analyse_protein.py | analyse_protein.py |
import subprocess
import tempfile
import os
def naccess_available():
"""True if naccess is available on the path."""
available = False
try:
subprocess.check_output(['naccess'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
available = True
except FileNotFoundError:
print("naccess has not been found on your path. If you have already "
"installed naccess but are unsure how to add it to your path, "
"check out this: https://stackoverflow.com/a/14638025")
return available
def run_naccess(pdb, mode, path=True, include_hetatms=False, outfile=None,
path_to_ex=None):
"""Uses naccess to run surface accessibility calculations.
Notes
-----
Requires the naccess program, with a path to its executable
provided in global_settings. For information on the Naccess program,
see: http://www.bioinf.manchester.ac.uk/naccess/
This includes information on the licensing, which is not free for
Industrial and Profit-making instituions.
Parameters
----------
pdb : str
Path to pdb file or string.
mode : str
Return mode of naccess. One of 'asa', 'rsa' or 'log'.
path : bool, optional
Indicates if pdb is a path or a string.
outfile : str, optional
Filepath for storing the naccess output.
path_to_ex : str or None
Path to the binary for naccess, if none then it is assumed
that the binary is available on the path as `naccess`.
Returns
-------
naccess_out : str
naccess output file for given mode as a string.
"""
if mode not in ['asa', 'rsa', 'log']:
raise ValueError(
"mode {} not valid. Must be \'asa\', \'rsa\' or \'log\'"
.format(mode))
if path_to_ex:
naccess_exe = path_to_ex
else:
naccess_exe = 'naccess'
if not path:
if type(pdb) == str:
pdb = pdb.encode()
else:
with open(pdb, 'r') as inf:
pdb = inf.read().encode()
this_dir = os.getcwd()
# temp pdb file in temp dir.
temp_dir = tempfile.TemporaryDirectory()
temp_pdb = tempfile.NamedTemporaryFile(dir=temp_dir.name)
temp_pdb.write(pdb)
temp_pdb.seek(0)
# run naccess in the temp_dir. Files created will be written here.
os.chdir(temp_dir.name)
if include_hetatms:
naccess_args = '-h'
subprocess.check_output([naccess_exe, naccess_args, temp_pdb.name])
else:
subprocess.check_output([naccess_exe, temp_pdb.name])
temp_pdb.close()
with open('.{}'.format(mode), 'r') as inf:
naccess_out = inf.read()
# navigate back to initial directory and clean up.
os.chdir(this_dir)
if outfile:
with open(outfile, 'w') as inf:
inf.write(naccess_out)
temp_dir.cleanup()
return naccess_out
def total_accessibility(in_rsa, path=True):
"""Parses rsa file for the total surface accessibility data.
Parameters
----------
in_rsa : str
Path to naccess rsa file.
path : bool
Indicates if in_rsa is a path or a string.
Returns
-------
dssp_residues : 5-tuple(float)
Total accessibility values for:
[0] all atoms
[1] all side-chain atoms
[2] all main-chain atoms
[3] all non-polar atoms
[4] all polar atoms
"""
if path:
with open(in_rsa, 'r') as inf:
rsa = inf.read()
else:
rsa = in_rsa[:]
all_atoms, side_chains, main_chain, non_polar, polar = [
float(x) for x in rsa.splitlines()[-1].split()[1:]]
return all_atoms, side_chains, main_chain, non_polar, polar
def extract_residue_accessibility(in_rsa, path=True, get_total=False):
"""Parses rsa file for solvent accessibility for each residue.
Parameters
----------
in_rsa : str
Path to naccess rsa file
path : bool
Indicates if in_rsa is a path or a string
get_total : bool
Indicates if the total accessibility from the file needs to
be extracted. Convenience method for running the
total_accessibility function but only running NACCESS once
Returns
-------
rel_solv_ac_acc_atoms : list
Relative solvent accessibility of all atoms in each amino acid
get_total : float
Relative solvent accessibility of all atoms in the NACCESS rsa file
"""
if path:
with open(in_rsa, 'r') as inf:
rsa = inf.read()
else:
rsa = in_rsa[:]
residue_list = [x for x in rsa.splitlines()]
rel_solv_acc_all_atoms = [
float(x[22:28])
for x in residue_list
if x[0:3] == "RES" or x[0:3] == "HEM"]
if get_total:
(all_atoms, _, _, _, _) = total_accessibility(
rsa, path=False)
return rel_solv_acc_all_atoms, all_atoms
return rel_solv_acc_all_atoms, None
__author__ = 'Jack W. Heal, Gail J. Bartlett' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/naccess.py | naccess.py |
from collections import OrderedDict
import itertools
import numpy
from .data import ELEMENT_DATA, PDB_ATOM_COLUMNS
from .geometry import distance, Quaternion, centre_of_mass, rmsd
def cap(v, l):
"""Shortens string is above certain length."""
s = str(v)
return s if len(s) <= l else s[-l:]
def find_atoms_within_distance(atoms, cutoff_distance, point):
"""Returns atoms within the distance from the point.
Parameters
----------
atoms : [ampal.atom]
A list of `ampal.atoms`.
cutoff_distance : float
Maximum distance from point.
point : (float, float, float)
Reference point, 3D coordinate.
Returns
-------
filtered_atoms : [ampal.atoms]
`atoms` list filtered by distance.
"""
return [x for x in atoms if distance(x, point) <= cutoff_distance]
def centre_of_atoms(atoms, mass_weighted=True):
""" Returns centre point of any list of atoms.
Parameters
----------
atoms : list
List of AMPAL atom objects.
mass_weighted : bool, optional
If True returns centre of mass, otherwise just geometric centre of points.
Returns
-------
centre_of_mass : numpy.array
3D coordinate for the centre of mass.
"""
points = [x._vector for x in atoms]
if mass_weighted:
masses = [x.mass for x in atoms]
else:
masses = []
return centre_of_mass(points=points, masses=masses)
def write_pdb(residues, chain_id=' ', alt_states=False, strip_states=False):
"""Writes a pdb file for a list of residues.
Parameters
----------
residues : list
List of Residue objects.
chain_id : str
String of the chain id, defaults to ' '.
alt_states : bool, optional
If true, include all occupancy states of residues, else outputs primary state only.
strip_states : bool, optional
If true, remove all state labels from residues. Only use with alt_states false.
Returns
-------
pdb_str : str
String of the PDB file.
"""
pdb_atom_col_dict = PDB_ATOM_COLUMNS
out_pdb = []
if len(str(chain_id)) > 1:
poly_id = ' '
else:
poly_id = str(chain_id)
for monomer in residues:
if (len(monomer.states) > 1) and alt_states and not strip_states:
atom_list = itertools.chain(
*[x[1].items() for x in sorted(monomer.states.items())])
else:
atom_list = monomer.atoms.items()
if 'chain_id' in monomer.tags:
poly_id = monomer.tags['chain_id']
for atom_t, atom in atom_list:
if strip_states:
state_label = ' '
elif (atom.tags['state'] == 'A') and (len(monomer.states) == 1):
state_label = ' '
else:
state_label = atom.tags['state']
atom_data = {
'atom_number': '{:>5}'.format(cap(atom.id, 5)),
'atom_name': '{:<4}'.format(cap(pdb_atom_col_dict[atom_t], 4)),
'alt_loc_ind': '{:<1}'.format(cap(state_label, 1)),
'residue_type': '{:<3}'.format(cap(monomer.mol_code, 3)),
'chain_id': '{:<1}'.format(cap(poly_id, 1)),
'res_num': '{:>4}'.format(cap(monomer.id, 4)),
'icode': '{:<1}'.format(cap(monomer.insertion_code, 1)),
'coord_str': '{0:>8.3f}{1:>8.3f}{2:>8.3f}'.format(
*[x for x in atom]),
'occupancy': '{:>6.2f}'.format(atom.tags['occupancy']),
'temp_factor': '{:>6.2f}'.format(atom.tags['bfactor']),
'element': '{:>2}'.format(cap(atom.element, 2)),
'charge': '{:<2}'.format(cap(atom.tags['charge'], 2))
}
if monomer.is_hetero:
pdb_line_template = (
'HETATM{atom_number} {atom_name}{alt_loc_ind}{residue_type}'
' {chain_id}{res_num}{icode} {coord_str}{occupancy}'
'{temp_factor} {element}{charge}\n'
)
else:
pdb_line_template = (
'ATOM {atom_number} {atom_name}{alt_loc_ind}{residue_type}'
' {chain_id}{res_num}{icode} {coord_str}{occupancy}'
'{temp_factor} {element}{charge}\n'
)
out_pdb.append(pdb_line_template.format(**atom_data))
return ''.join(out_pdb)
class BaseAmpal(object):
"""Base class for all AMPAL objects except `ampal.atom`.
Raises
------
NotImplementedError
`BaseAmpal` is an abstract base class and is not intended to
be instanciated. A `NotImplementedError` is raised if a
method is called that is required on a child class but is
not implemented in `BaseAmpal`.
"""
@property
def pdb(self):
"""Runs make_pdb in default mode."""
return self.make_pdb()
@property
def centre_of_mass(self):
"""Returns the centre of mass of AMPAL object.
Notes
-----
All atoms are included in calculation, call `centre_of_mass`
manually if another selection is require.
Returns
-------
centre_of_mass : numpy.array
3D coordinate for the centre of mass.
"""
elts = set([x.element for x in self.get_atoms()])
masses_dict = {e: ELEMENT_DATA[e]['atomic mass'] for e in elts}
points = [x._vector for x in self.get_atoms()]
masses = [masses_dict[x.element] for x in self.get_atoms()]
return centre_of_mass(points=points, masses=masses)
def is_within(self, cutoff_dist, point):
"""Returns all atoms in ampal object within `cut-off` distance from the `point`."""
return find_atoms_within_distance(self.get_atoms(), cutoff_dist, point)
def get_atoms(self, ligands=True, inc_alt_states=False):
raise NotImplementedError
def make_pdb(self):
raise NotImplementedError
def rotate(self, angle, axis, point=None, radians=False, inc_alt_states=True):
"""Rotates every atom in the AMPAL object.
Parameters
----------
angle : float
Angle that AMPAL object will be rotated.
axis : 3D Vector (tuple, list, numpy.array)
Axis about which the AMPAL object will be rotated.
point : 3D Vector (tuple, list, numpy.array), optional
Point that the axis lies upon. If `None` then the origin is used.
radians : bool, optional
True is `angle` is define in radians, False is degrees.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
"""
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector = q.rotate_vector(v=atom._vector, point=point)
return
def translate(self, vector, inc_alt_states=True):
"""Translates every atom in the AMPAL object.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
"""
vector = numpy.array(vector)
for atom in self.get_atoms(inc_alt_states=inc_alt_states):
atom._vector += vector
return
def rmsd(self, other, backbone=False):
"""Calculates the RMSD between two AMPAL objects.
Notes
-----
No fitting operation is performs and both AMPAL objects must
have the same number of atoms.
Parameters
----------
other : AMPAL Object
Any AMPAL object with `get_atoms` method.
backbone : bool, optional
Calculates RMSD of backbone only.
"""
assert type(self) is type(other)
if backbone and hasattr(self, 'backbone'):
points1 = self.backbone.get_atoms()
points2 = other.backbone.get_atoms()
else:
points1 = self.get_atoms()
points2 = other.get_atoms()
points1 = [x._vector for x in points1]
points2 = [x._vector for x in points2]
return rmsd(points1=points1, points2=points2)
class Polymer(BaseAmpal):
"""A container that holds `Monomer` type objects.
Notes
-----
`Polymer` has a simple hierarchy: A `Polymer` contains one or
more `Monomer`.
Parameters
----------
monomers : Monomer or [Monomer], optional
Monomer or list containing Monomer objects to form the Polymer().
ligands : LigandGroup, optional
`Ligands` associated with the `Polymer`.
polymer_id : str, optional
An ID that the user can use to identify the `Polymer`. This is
used when generating a pdb file using `Polymer().pdb`.
molecule_type : str, optional
A description of the type of `Polymer` i.e. Protein, DNA etc.
parent : ampal.Assembly, optional
Reference to `Assembly` containing the `Polymer`.
sl : int, optional
The default smoothing level used when calculating the
backbone primitive.
Attributes
----------
id : str
Polymer ID
parent : ampal.Assembly or None
Reference to `Assembly` containing the `Polymer`.
molecule_type : str
A description of the type of `Polymer` i.e. Protein, DNA etc.
ligands : ampal.LigandGroup
A `LigandGroup` containing all the `Ligands` associated with this
`Polymer` chain.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
sl : int
The default smoothing level used when calculating the
backbone primitive.
Raises
------
TypeError
Polymer objects can only be initialised empty, using a Monomer
or a list of Monomers.
"""
def __init__(self, monomers=None, ligands=None, polymer_id=' ',
molecule_type='', parent=None, sl=2):
if monomers:
if isinstance(monomers, Monomer):
self._monomers = [monomers]
elif isinstance(monomers, list) and isinstance(monomers[0], Monomer):
self._monomers = list(monomers)
else:
raise TypeError(
'Polymer objects can only be initialised empty, '
'using a Monomer or a list of Monomers.')
else:
self._monomers = []
self.id = str(polymer_id)
self.parent = parent
self.molecule_type = molecule_type
self.ligands = ligands
self.tags = {}
self.sl = sl
def __add__(self, other):
if isinstance(other, Polymer):
merged_polymer = self._monomers + other._monomers
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer.')
return Polymer(monomers=merged_polymer, polymer_id=self.id)
def __len__(self):
return len(self._monomers)
def __getitem__(self, item):
if isinstance(item, str):
id_dict = {str(m.id): m for m in self._monomers}
return id_dict[item]
elif isinstance(item, int):
return self._monomers[item]
return Polymer(self._monomers[item], polymer_id=self.id)
def __repr__(self):
return '<Polymer containing {} {}>'.format(
len(self._monomers), 'Monomer' if len(self._monomers) == 1 else 'Monomers')
def append(self, item):
"""Appends a `Monomer to the `Polymer`.
Notes
-----
Does not update labelling.
"""
if isinstance(item, Monomer):
self._monomers.append(item)
else:
raise TypeError(
'Only Monomer objects can be appended to an Polymer.')
return
def extend(self, polymer):
"""Extends the `Polymer` with the contents of another `Polymer`.
Notes
-----
Does not update labelling.
"""
if isinstance(polymer, Polymer):
self._monomers.extend(polymer)
else:
raise TypeError(
'Only Polymer objects may be merged with a Polymer using "+".')
return
def get_monomers(self, ligands=True):
"""Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers)
def get_atoms(self, ligands=True, inc_alt_states=False):
"""Flat list of all the Atoms in the Polymer.
Parameters
----------
inc_alt_states : bool
If true atoms from alternate conformations are included rather
than only the "active" states.
Returns
-------
atoms : itertools.chain
Returns an iterator of all the atoms. Convert to list if you
require indexing.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
atoms = itertools.chain(
*(list(m.get_atoms(inc_alt_states=inc_alt_states)) for m in monomers))
return atoms
def relabel_monomers(self, labels=None):
"""Relabels the either in numerically or using a list of labels.
Parameters
----------
labels : list, optional
A list of new labels.
Raises
------
ValueError
Raised if the number of labels does not match the number of
component Monoer objects.
"""
if labels:
if len(self._monomers) == len(labels):
for monomer, label in zip(self._monomers, labels):
monomer.id = str(label)
else:
error_string = (
'Number of Monomers ({}) and number of labels '
'({}) must be equal.')
raise ValueError(error_string.format(
len(self._monomers), len(labels)))
else:
for i, monomer in enumerate(self._monomers):
monomer.id = str(i + 1)
return
def relabel_atoms(self, start=1):
"""Relabels all `Atoms` in numerical order.
Parameters
----------
start : int, optional
Offset the labelling by `start` residues.
"""
counter = start
for atom in self.get_atoms():
atom.id = counter
counter += 1
return
def relabel_all(self):
"""Relabels all `Monomers` and `Atoms` using default labeling."""
self.relabel_monomers()
self.relabel_atoms()
return
def make_pdb(self, alt_states=False, inc_ligands=True):
"""Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`.
"""
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str
def get_reference_coords(self):
"""Gets list of coordinates of all reference atoms in the `Polymer`.
Returns
-------
ref_coords : [numpy.array]
List has the same length as the `Polymer`.
The first, second and third elements of array i contain the
x, y and z coordinates of the i-th reference atom.
"""
return [x[x.reference_atom].array for x in self._monomers]
class Monomer(BaseAmpal):
"""Groups of `Atoms` that form `Polymers`.
Parameters
----------
atoms : OrderedDict or {OrderedDict}, optional
OrderedDict containing Atoms for the Monomer. OrderedDict
is used to maintain the order items were added to the dictionary.
monomer_id : str, optional
String used to identify the residue.
parent : Polymer, optional
A reference to the `Polymer` containing this `Monomer`.
Attributes
----------
states : dict
Contains an `OrderedDicts` containing atom information for each
state available for the `Monomer`.
id : str
String used to identify the residue.
parent : Polymer or None
A reference to the `Polymer` containing this `Monomer`.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, atoms=None, monomer_id=' ', parent=None):
if isinstance(atoms, OrderedDict):
self.states = dict(A=atoms)
self._active_state = 'A'
elif isinstance(atoms, dict):
self.states = atoms
self._active_state = sorted(self.states.keys())[0]
else:
# Sets up dummy states which should be filled later
self.states = {'A': OrderedDict()}
self._active_state = 'A'
self.id = str(monomer_id)
self.parent = parent
self.tags = {}
def __getitem__(self, key):
return self.atoms.__getitem__(key)
def __setitem__(self, key, value):
self.atoms.__setitem__(key, value)
def __iter__(self):
return iter(self.atoms.values())
def __len__(self):
return len(self.atoms)
def __repr__(self):
return '<Monomer containing {} {}>'.format(
len(self.atoms), 'Atom' if len(self.atoms) == 1 else 'Atoms')
@property
def active_state(self):
"""Defines which state dictionary should be used currently."""
return self._active_state
@active_state.setter
def active_state(self, value):
if value in self.states.keys():
self._active_state = value
else:
raise KeyError(
'Selected alternate state is not available please use: {}'.format(
list(self.states.keys()))
)
@property
def atoms(self):
"""Atoms in the currently active state."""
return self.states[self.active_state]
@atoms.setter
def atoms(self, atom_dict):
if not isinstance(atom_dict, OrderedDict):
raise TypeError('Atoms dict must be of the type OrderedDict.')
if self.states:
self.states[self.active_state] = atom_dict
def get_monomers(self):
"""Returns the this `Monomer`.
Notes
-----
This function is only present for consistency in the interface.
"""
return [self]
def get_atoms(self, inc_alt_states=False):
"""Returns all atoms in the `Monomer`.
Parameters
----------
inc_alt_states : bool, optional
If `True`, will return `Atoms` for alternate states.
"""
if inc_alt_states:
return itertools.chain(*[x[1].values() for x in sorted(list(self.states.items()))])
return self.atoms.values()
def make_pdb(self):
"""Generates a PDB string for the `Monomer`."""
pdb_str = write_pdb(
[self], ' ' if not self.parent else self.parent.id)
return pdb_str
def close_monomers(self, group, cutoff=4.0):
"""Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
"""
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues
class Atom(object):
"""Object containing atomic coordinates and element information.
Notes
-----
`Atom` is an AMPAL object, but it does not inherit from `BaseAmpal`.
Parameters
----------
coordinates : 3D Vector (tuple, list, numpy.array)
Position of `Atom` in 3D space.
element : str
Element of `Atom`.
atom_id : str
Identifier for `Atom`, usually a number.
res_label : str, optional
Label used in `Monomer` to refer to the `Atom` type i.e. "CA" or "OD1".
occupancy : float, optional
The occupancy of the `Atom`.
bfactor : float, optional
The bfactor of the `Atom`.
charge : str, optional
The point charge of the `Atom`.
state : str
The state of this `Atom`. Used to identify `Atoms` with a
number of conformations.
parent : ampal.Monomer, optional
A reference to the `Monomer` containing this `Atom`.
Attributes
----------
id : str
Identifier for `Atom`, usually a number.
res_label : str
Label used in `Monomer` to refer to the `Atom` type i.e. "CA" or "OD1".
element : str
Element of `Atom`.
parent : ampal.Monomer
A reference to the `Monomer` containing this `Atom`.
number of conformations.
tags : dict
A dictionary containing information about this AMPAL object.
The tags dictionary is used by AMPAL to cache information
about this object, but is also intended to be used by users
to store any relevant information they have.
"""
def __init__(self, coordinates, element, atom_id=' ', res_label=None,
occupancy=1.0, bfactor=1.0, charge=' ', state='A',
parent=None):
self._vector = numpy.array(coordinates)
self.id = atom_id
self.res_label = res_label
self.element = element
self.parent = parent
self.tags = {
'occupancy': occupancy,
'bfactor': bfactor,
'charge': charge,
'state': state
}
self._ff_id = None
def __repr__(self):
return "<{} Atom{}. Coordinates: ({:.3f}, {:.3f}, {:.3f})>".format(
ELEMENT_DATA[self.element.title()]['name'],
'' if not self.res_label else ' ({})'.format(self.res_label),
self.x, self.y, self.z)
def __getitem__(self, item):
return self._vector[item]
def __setitem__(self, item, value):
self._vector[item] = value
return
def __sub__(self, other):
"""Subtracts coordinates and returns a `numpy.array`.
Notes
-----
Objects themselves remain unchanged.
"""
assert isinstance(other, Atom)
return self._vector - other._vector
def __add__(self, other):
"""Adds coordinates and returns a `numpy.array`.
Notes
-----
Objects themselves remain unchanged.
"""
assert isinstance(other, Atom)
return self._vector + other._vector
@property
def array(self):
"""Converts the atomic coordinate to a `numpy.array`."""
return self._vector
@property
def x(self):
"""The x coordinate."""
return self._vector[0]
@property
def y(self):
"""The y coordinate."""
return self._vector[1]
@property
def z(self):
"""The z coordinate."""
return self._vector[2]
@property
def unique_id(self):
"""Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
"""
chain = self.parent.parent.id
residue = self.parent.id
return chain, residue, self.id
def rotate(self, angle, axis, point=None, radians=False):
"""Rotates `Atom` by `angle`.
Parameters
----------
angle : float
Angle that `Atom` will be rotated.
axis : 3D Vector (tuple, list, numpy.array)
Axis about which the `Atom` will be rotated.
point : 3D Vector (tuple, list, numpy.array), optional
Point that the `axis` lies upon. If `None` then the origin is used.
radians : bool, optional
True is `angle` is define in radians, False is degrees.
"""
q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians)
self._vector = q.rotate_vector(v=self._vector, point=point)
return
def translate(self, vector):
"""Translates `Atom`.
Parameters
----------
vector : 3D Vector (tuple, list, numpy.array)
Vector used for translation.
inc_alt_states : bool, optional
If true, will rotate atoms in all states i.e. includes
alternate conformations for sidechains.
"""
vector = numpy.array(vector)
self._vector += numpy.array(vector)
return
__author__ = 'Christopher W. Wood, Kieran L. Hudson' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/base_ampal.py | base_ampal.py |
import itertools
import networkx
from .data import ELEMENT_DATA
from .geometry import distance, gen_sectors
core_components = [
'ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL',
'HOH']
class Interaction(object):
"""A container for all types of interaction with donor and acceptor.
Parameters
----------
a : ampal.Atom
A member of a pairwise interaction.
b : ampal.Atom
A member of a pairwise interaction.
dist : float
The distance between `a` and `b`.
Attributes
----------
a : ampal.Atom
A member of a pairwise interaction.
b : ampal.Atom
A member of a pairwise interaction.
dist : float
The distance between `Atom` `a` and `b`.
"""
def __init__(self, a, b, dist):
self._a = a
self._b = b
self.dist = dist
def __hash__(self):
return hash((self._a, self._b))
def __eq__(self, other):
return (type(self), self._a, self._b) == (type(other), other._a, other._b)
def __repr__(self):
am = self._a.parent
ac = am.parent
bm = self._b.parent
bc = bm.parent
return '<Interaction between {} {}{} and {} {}{}>'.format(
self._a.res_label, am.id, ac.id, self._b.res_label, bm.id, bc.id)
class CovalentBond(Interaction):
"""Defines a covalent bond."""
@property
def a(self):
"""One `Atom` involved in the covalent bond."""
return self._a
@property
def b(self):
"""One `Atom` involved in the covalent bond."""
return self._b
def __repr__(self):
am = self._a.parent
ac = am.parent
bm = self._b.parent
bc = bm.parent
return '<Covalent bond between {}{} {} {} --- {} {} {}{}>'.format(
ac.id, am.id, am.mol_code, self._a.res_label, self._b.res_label,
bm.mol_code, bc.id, bm.id)
class NonCovalentInteraction(Interaction):
""" A container for all non-covalent interaction.
Parameters
----------
donor : ampal.Atom
The donor `Atom` in the interaction.
acceptor : ampal.Atom
The acceptor atom in the interaction.
dist : float
The distance between `Atom` `a` and `b`.
Attributes
----------
donor : ampal.Atom
The donor `Atom` in the interaction.
acceptor : ampal.Atom
The acceptor atom in the interaction.
dist : float
The distance between `Atom` `a` and `b`.
"""
def __init__(self, donor, acceptor, dist):
super().__init__(donor, acceptor, dist)
@property
def donor(self):
"""The donor `Atom` in the interaction."""
return self._a
@property
def acceptor(self):
"""The acceptor in the interaction."""
return self._b
def __repr__(self):
return ('<Interaction between {} {}{} (donor) '
'and {} {}{} (acceptor)>'.format(
self.donor.mol_code, self.donor.id,
self.donor.parent.id, self.acceptor.mol_code,
self.acceptor.id, self.acceptor.parent.id))
class HydrogenBond(NonCovalentInteraction):
"""Defines a hydrogen bond in terms of a donor and an acceptor.
Parameters
----------
donor : ampal.Atom
The donor `Atom` in the interaction.
acceptor : ampal.Atom
The acceptor atom in the interaction.
dist : float
The distance between `Atom` `a` and `b`.
ang_a : float
Angle between the acceptor and the interaction vector.
ang_d : float
Angle between the donor and the interaction vector.
Attributes
----------
donor : ampal.Atom
The donor `Atom` in the interaction.
acceptor : ampal.Atom
The acceptor atom in the interaction.
dist : float
The distance between `Atom` `a` and `b`.
ang_a : float
Angle between the acceptor and the interaction vector.
ang_d : float
Angle between the donor and the interaction vector.
"""
def __init__(self, donor, acceptor, dist, ang_d, ang_a):
super().__init__(donor, acceptor, dist)
self.ang_d = ang_d
self.ang_a = ang_a
@property
def donor_monomer(self):
"""The donor `Monomer` in the interaction."""
return self._a.parent
@property
def acceptor_monomer(self):
"""The acceptor `Monomer` in the interaction."""
return self._b.parent
def __repr__(self):
dm = self.donor.parent
dc = dm.parent
am = self.acceptor.parent
ac = am.parent
return '<Hydrogen Bond between ({}{}) {}-{} ||||| {}-{} ({}{})>'.format(
dm.id, dc.id, dm.mol_code, self.donor.res_label,
self.acceptor.res_label, am.mol_code, am.id, ac.id)
def covalent_bonds(atoms, threshold=1.1):
"""Returns all the covalent bonds in a list of `Atom` pairs.
Notes
-----
Uses information `ELEMENT_DATA`, which can be accessed directly
through this module i.e. `isambard.ampal.interactions.ELEMENT_DATA`.
Parameters
----------
atoms : [(`Atom`, `Atom`)]
List of pairs of `Atoms`.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
"""
bonds = []
for a, b in atoms:
bond_distance = (
ELEMENT_DATA[a.element.title()]['atomic radius'] + ELEMENT_DATA[
b.element.title()]['atomic radius']) / 100
dist = distance(a._vector, b._vector)
if dist <= bond_distance * threshold:
bonds.append(CovalentBond(a, b, dist))
return bonds
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True):
"""Finds all covalent bonds in the AMPAL object.
Parameters
----------
ampal : AMPAL Object
Any AMPAL object with a `get_atoms` method.
max_range : float, optional
Used to define the sector size, so interactions at longer ranges
will not be found.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
tag : bool, optional
If `True`, will add the covalent bond to the tags dictionary of
each `Atom` involved in the interaction under the `covalent_bonds`
key.
"""
sectors = gen_sectors(ampal.get_atoms(), max_range * 1.1)
bonds = []
for sector in sectors.values():
atoms = itertools.combinations(sector, 2)
bonds.extend(covalent_bonds(atoms, threshold=threshold))
bond_set = list(set(bonds))
if tag:
for bond in bond_set:
a, b = bond.a, bond.b
if 'covalent_bonds' not in a.tags:
a.tags['covalent_bonds'] = [b]
else:
a.tags['covalent_bonds'].append(b)
if 'covalent_bonds' not in b.tags:
b.tags['covalent_bonds'] = [a]
else:
b.tags['covalent_bonds'].append(a)
return bond_set
def generate_covalent_bond_graph(covalent_bonds):
"""Generates a graph of the covalent bond network described by the interactions.
Parameters
----------
covalent_bonds: [CovalentBond]
List of `CovalentBond`.
Returns
-------
bond_graph: networkx.Graph
A graph of the covalent bond network.
"""
bond_graph = networkx.Graph()
for inter in covalent_bonds:
bond_graph.add_edge(inter.a, inter.b)
return bond_graph
def generate_bond_subgraphs_from_break(bond_graph, atom1, atom2):
"""Splits the bond graph between two atoms to producing subgraphs.
Notes
-----
This will not work if there are cycles in the bond graph.
Parameters
----------
bond_graph: networkx.Graph
Graph of covalent bond network
atom1: isambard.ampal.Atom
First atom in the bond.
atom2: isambard.ampal.Atom
Second atom in the bond.
Returns
-------
subgraphs: [networkx.Graph]
A list of subgraphs generated when a bond is broken in the covalent
bond network.
"""
bond_graph.remove_edge(atom1, atom2)
try:
subgraphs = list(networkx.connected_component_subgraphs(
bond_graph, copy=False))
finally:
# Add edge
bond_graph.add_edge(atom1, atom2)
return subgraphs
__author__ = 'Kieran L. Hudson, Christopher W. Wood, Gail J. Bartlett' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/interactions.py | interactions.py |
from collections import OrderedDict
import itertools
import pathlib
from .base_ampal import Atom
from .assembly import AmpalContainer, Assembly
from .protein import Polypeptide, Residue
from .nucleic_acid import Polynucleotide, Nucleotide
from .ligands import Ligand, LigandGroup
from .amino_acids import standard_amino_acids
from .data import PDB_ATOM_COLUMNS
def load_pdb(pdb, path=True, pdb_id='', ignore_end=False):
"""Converts a PDB file into an AMPAL object.
Parameters
----------
pdb : str
Either a path to a PDB file or a string containing PDB
format structural data.
path : bool, optional
If `true`, flags `pdb` as a path and not a PDB string.
pdb_id : str, optional
Identifier for the `Assembly`.
ignore_end : bool, optional
If `false`, parsing of the file will stop when an "END"
record is encountered.
Returns
-------
ampal : ampal.Assembly or ampal.AmpalContainer
AMPAL object that contains the structural information from
the PDB file provided. If the PDB file has a single state
then an `Assembly` will be returned, otherwise an
`AmpalContainer` will be returned.
"""
pdb_p = PdbParser(pdb, path=path, pdb_id=pdb_id, ignore_end=ignore_end)
return pdb_p.make_ampal()
class PdbParser(object):
"""Parses a PDB file and produces and AMPAL Assembly.
Parameters
----------
pdb : str
Either a path to a PDB file or a string containing PDB
format structural data.
path : bool, optional
If `true`, flags `pdb` as a path and not a PDB string.
pdb_id : str, optional
Identifier for the `Assembly`.
ignore_end : bool, optional
If `false`, parsing of the file will stop when an "END"
record is encountered.
Attributes
----------
proc_functions : dict
Keys are PDB labels i.e. "ATOM" or "END", values are the
function that processes that specific line.
id : str
Identifier for the `Assembly`.
pdb_lines : [str]
Input PDB split into line.
new_labels : bool
Indicates if new atom or residue labels have been found while
parsing the PDB file.
state : int
Current state being appended to. This is used on multi-state
files like NMR structures.
pdb_parse_tree : dict
This dictionary represents the parse tree of the PDB file.
Each line of the structure is broken down into a key, the
entry label, and a value, the data.
current_line : int
The line that is currently being parsed.
ignore_end : bool, optional
If `false`, parsing of the file will stop when an "END"
record is encountered.
"""
def __init__(self, pdb, path=True, pdb_id='', ignore_end=False):
self.proc_functions = {
'ATOM': self.proc_atom,
'HETATM': self.proc_atom,
'ENDMDL': self.change_state,
'END': self.end
}
if path:
pdb_path = pathlib.PurePath(pdb)
with open(str(pdb_path), 'r') as inf:
pdb_str = inf.read()
self.id = pdb_path.stem
else:
pdb_str = pdb
self.id = pdb_id
self.pdb_lines = pdb_str.splitlines()
self.new_labels = False
self.state = 0
self.pdb_parse_tree = None
self.current_line = None
self.ignore_end = ignore_end
self.parse_pdb_file()
def parse_pdb_file(self):
"""Runs the PDB parser."""
self.pdb_parse_tree = {'info': {},
'data': {
self.state: {}}
}
try:
for line in self.pdb_lines:
self.current_line = line
record_name = line[:6].strip()
if record_name in self.proc_functions:
self.proc_functions[record_name]()
else:
if record_name not in self.pdb_parse_tree['info']:
self.pdb_parse_tree['info'][record_name] = []
self.pdb_parse_tree['info'][record_name].append(line)
except EOFError:
# Raised by END record
pass
return
def proc_atom(self):
"""Processes an "ATOM" or "HETATM" record."""
atom_data = self.proc_line_coordinate(self.current_line)
(at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq,
i_code, x, y, z, occupancy, temp_factor, element, charge) = atom_data
# currently active state
a_state = self.pdb_parse_tree['data'][self.state]
res_id = (res_seq, i_code)
if chain_id not in a_state:
a_state[chain_id] = (set(), OrderedDict())
if res_id not in a_state[chain_id][1]:
a_state[chain_id][1][res_id] = (set(), OrderedDict())
if at_type == 'ATOM':
if res_name in standard_amino_acids.values():
poly = 'P'
else:
poly = 'N'
else:
poly = 'H'
a_state[chain_id][0].add((chain_id, at_type, poly))
a_state[chain_id][1][res_id][0].add(
(at_type, res_seq, res_name, i_code))
if at_ser not in a_state[chain_id][1][res_id][1]:
a_state[chain_id][1][res_id][1][at_ser] = [atom_data]
else:
a_state[chain_id][1][res_id][1][at_ser].append(atom_data)
return
def change_state(self):
"""Increments current state and adds a new dict to the parse tree."""
self.state += 1
self.pdb_parse_tree['data'][self.state] = {}
return
def end(self):
"""Processes an "END" record."""
if not self.ignore_end:
raise EOFError
else:
return
def proc_line_coordinate(self, line):
"""Extracts data from columns in ATOM/HETATM record."""
at_type = line[0:6].strip() # 0
at_ser = int(line[6:11].strip()) # 1
at_name = line[12:16].strip() # 2
alt_loc = line[16].strip() # 3
res_name = line[17:20].strip() # 4
chain_id = line[21].strip() # 5
res_seq = int(line[22:26].strip()) # 6
i_code = line[26].strip() # 7
x = float(line[30:38].strip()) # 8
y = float(line[38:46].strip()) # 9
z = float(line[46:54].strip()) # 10
occupancy = float(line[54:60].strip()) # 11
temp_factor = float(line[60:66].strip()) # 12
element = line[76:78].strip() # 13
charge = line[78:80].strip() # 14
if at_name not in PDB_ATOM_COLUMNS:
PDB_ATOM_COLUMNS[at_name] = line[12:16]
self.new_labels = True
return (at_type, at_ser, at_name, alt_loc, res_name, chain_id, res_seq,
i_code, x, y, z, occupancy, temp_factor, element, charge)
# Generate PDB from parse tree
def make_ampal(self):
"""Generates an AMPAL object from the parse tree.
Notes
-----
Will create an `Assembly` if there is a single state in the
parese tree or an `AmpalContainer` if there is more than one.
"""
data = self.pdb_parse_tree['data']
if len(data) > 1:
ac = AmpalContainer(id=self.id)
for state, chains in sorted(data.items()):
if chains:
ac.append(self.proc_state(chains, self.id +
'_state_{}'.format(state + 1)))
return ac
elif len(data) == 1:
return self.proc_state(data[0], self.id)
else:
raise ValueError('Empty parse tree, check input PDB format.')
def proc_state(self, state_data, state_id):
"""Processes a state into an `Assembly`.
Parameters
----------
state_data : dict
Contains information about the state, including all
the per line structural data.
state_id : str
ID given to `Assembly` that represents the state.
"""
assembly = Assembly(assembly_id=state_id)
for k, chain in sorted(state_data.items()):
assembly._molecules.append(self.proc_chain(chain, assembly))
return assembly
def proc_chain(self, chain_info, parent):
"""Converts a chain into a `Polymer` type object.
Parameters
----------
chain_info : (set, OrderedDict)
Contains a set of chain labels and atom records.
parent : ampal.Assembly
`Assembly` used to assign `parent` on created
`Polymer`.
Raises
------
ValueError
Raised if multiple or unknown atom types found
within the same chain.
AttributeError
Raised if unknown `Monomer` type encountered.
"""
hetatom_filters = {
'nc_aas': self.check_for_non_canonical
}
polymer = False
chain_labels, chain_data = chain_info
chain_label = list(chain_labels)[0]
monomer_types = {x[2] for x in chain_labels if x[2]}
if ('P' in monomer_types) and ('N' in monomer_types):
raise ValueError(
'Malformed PDB, multiple "ATOM" types in a single chain.')
# Changes Polymer type based on chain composition
if 'P' in monomer_types:
polymer_class = Polypeptide
polymer = True
elif 'N' in monomer_types:
polymer_class = Polynucleotide
polymer = True
elif 'H' in monomer_types:
polymer_class = LigandGroup
else:
raise AttributeError('Malformed parse tree, check inout PDB.')
chain = polymer_class(polymer_id=chain_label[0], parent=parent)
# Changes where the ligands should go based on the chain composition
if polymer:
chain.ligands = LigandGroup(
polymer_id=chain_label[0], parent=parent)
ligands = chain.ligands
else:
ligands = chain
for residue in chain_data.values():
res_info = list(residue[0])[0]
if res_info[0] == 'ATOM':
chain._monomers.append(self.proc_monomer(residue, chain))
elif res_info[0] == 'HETATM':
mon_cls = None
on_chain = False
for filt_func in hetatom_filters.values():
filt_res = filt_func(residue)
if filt_res:
mon_cls, on_chain = filt_res
break
mon_cls = Ligand
if on_chain:
chain._monomers.append(self.proc_monomer(
residue, chain, mon_cls=mon_cls))
else:
ligands._monomers.append(self.proc_monomer(
residue, chain, mon_cls=mon_cls))
else:
raise ValueError('Malformed PDB, unknown record type for data')
return chain
def proc_monomer(self, monomer_info, parent, mon_cls=False):
"""Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly.
"""
monomer_labels, monomer_data = monomer_info
if len(monomer_labels) > 1:
raise ValueError(
'Malformed PDB, single monomer id with '
'multiple labels. {}'.format(monomer_labels))
else:
monomer_label = list(monomer_labels)[0]
if mon_cls:
monomer_class = mon_cls
het = True
elif monomer_label[0] == 'ATOM':
if monomer_label[2] in standard_amino_acids.values():
monomer_class = Residue
else:
monomer_class = Nucleotide
het = False
else:
raise ValueError('Unknown Monomer type.')
monomer = monomer_class(
atoms=None, mol_code=monomer_label[2], monomer_id=monomer_label[1],
insertion_code=monomer_label[3], is_hetero=het, parent=parent
)
monomer.states = self.gen_states(monomer_data.values(), monomer)
monomer._active_state = sorted(monomer.states.keys())[0]
return monomer
def gen_states(self, monomer_data, parent):
"""Generates the `states` dictionary for a `Monomer`.
monomer_data : list
A list of atom data parsed from the input PDB.
parent : ampal.Monomer
`Monomer` used to assign `parent` on created
`Atoms`.
"""
states = {}
for atoms in monomer_data:
for atom in atoms:
state = 'A' if not atom[3] else atom[3]
if state not in states:
states[state] = OrderedDict()
states[state][atom[2]] = Atom(
tuple(atom[8:11]), atom[13], atom_id=atom[1],
res_label=atom[2], occupancy=atom[11], bfactor=atom[12],
charge=atom[14], state=state, parent=parent)
# This code is to check if there are alternate states and populate any
# both states with the full complement of atoms
states_len = [(k, len(x)) for k, x in states.items()]
if (len(states) > 1) and (len(set([x[1] for x in states_len])) > 1):
for t_state, t_state_d in states.items():
new_s_dict = OrderedDict()
for k, v in states[sorted(states_len,
key=lambda x: x[0])[0][0]].items():
if k not in t_state_d:
c_atom = Atom(
v._vector, v.element, atom_id=v.id,
res_label=v.res_label,
occupancy=v.tags['occupancy'],
bfactor=v.tags['bfactor'], charge=v.tags['charge'],
state=t_state[0], parent=v.parent)
new_s_dict[k] = c_atom
else:
new_s_dict[k] = t_state_d[k]
states[t_state] = new_s_dict
return states
# HETATM filters
@staticmethod
def check_for_non_canonical(residue):
"""Checks to see if the residue is non-canonical."""
res_label = list(residue[0])[0][2]
atom_labels = {x[2] for x in itertools.chain(
*residue[1].values())} # Used to find unnatural aas
if (all(x in atom_labels for x in ['N', 'CA', 'C', 'O'])) and (
len(res_label) == 3):
return Residue, True
return None
__author__ = "Christopher W. Wood" | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/pdb_parser.py | pdb_parser.py |
import json
import requests
import os
from .data import AMINO_ACIDS_DATA
water_mass = 18.01528
ideal_backbone_bond_lengths = {
# Ideal bond distances from:
# Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979.
'n_ca': 1.47,
'ca_c': 1.53,
'c_o': 1.24,
# peptide bond length for residues.
'c_n': 1.32,
}
ideal_backbone_bond_angles = {
# Ideal bond angles from:
# Schulz, G. E, and R. Heiner Schirmer. Principles Of Protein Structure. New York: Springer-Verlag, 1979.
'trans': {
'n_ca_c': 110.0,
'ca_c_o': 121.0,
'ca_c_n': 114.0,
'c_n_ca': 123.0,
'o_c_n': 125.0,
},
'cis': {
'n_ca_c': 110.0,
'ca_c_o': 119.0,
'ca_c_n': 118.0,
'c_n_ca': 126.0,
'o_c_n': 123.0
}
}
residue_mwt = {
'A': 71.0779, 'C': 103.1429, 'D': 115.0874, 'E': 129.114, 'F': 147.1739,
'G': 57.0513, 'H': 137.1393, 'I': 113.1576, 'K': 128.1723, 'L': 113.1576,
'M': 131.1961, 'N': 114.1026, 'P': 97.1152, 'Q': 128.1292, 'R': 156.1857,
'S': 87.0773, 'T': 101.1039, 'V': 99.1311, 'W': 186.2099, 'Y': 163.1733,
'X': 57.0513
}
residue_charge = {
'A': 0, 'C': -1, 'D': -1, 'E': -1, 'F': 0,
'G': 0, 'H': +1, 'I': 0, 'K': +1, 'L': 0,
'M': 0, 'N': 0, 'P': 0, 'Q': 0, 'R': +1,
'S': 0, 'T': 0, 'V': 0, 'W': 0, 'Y': -1,
'N-term': +1, 'C-term': -1, 'X': 0
}
residue_pka = {
'A': 0.0, 'C': 8.3, 'D': 3.65, 'E': 4.25, 'F': 0.0,
'G': 0.0, 'H': 6.1, 'I': 0.0, 'K': 10.53, 'L': 0.0,
'M': 0.0, 'N': 0.0, 'P': 0.0, 'Q': 0.0, 'R': 12.48,
'S': 0.0, 'T': 0.0, 'V': 0.0, 'W': 0.0, 'Y': 10.1,
'N-term': 8.0, 'C-term': 3.1, 'X': 0.0
}
residue_ext_280 = {
'A': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0,
'G': 0, 'H': 0, 'I': 0, 'K': 0, 'L': 0,
'M': 0, 'N': 0, 'P': 0, 'Q': 0, 'R': 0,
'S': 0, 'T': 0, 'V': 0, 'W': 5690, 'Y': 1280,
'X': 0
}
standard_amino_acids = {
'A': 'ALA', 'C': 'CYS', 'D': 'ASP', 'E': 'GLU', 'F': 'PHE',
'G': 'GLY', 'H': 'HIS', 'I': 'ILE', 'K': 'LYS', 'L': 'LEU',
'M': 'MET', 'N': 'ASN', 'P': 'PRO', 'Q': 'GLN', 'R': 'ARG',
'S': 'SER', 'T': 'THR', 'V': 'VAL', 'W': 'TRP', 'Y': 'TYR'
}
side_chain_dihedrals = {
'ARG': [
['N', 'CA', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CA', 'CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
['CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2']],
'ASN': [
['N', 'CA', 'CB', 'CG', 'OD1', 'ND2'],
['CA', 'CB', 'CG', 'OD1', 'ND2']],
'ASP': [
['N', 'CA', 'CB', 'CG', 'OD1', 'OD2'],
['CA', 'CB', 'CG', 'OD1', 'OD2']],
'CYS': [['N', 'CA', 'CB', 'SG']],
'GLN': [
['N', 'CA', 'CB', 'CG', 'CD', 'OE1', 'NE2'],
['CA', 'CB', 'CG', 'CD', 'OE1', 'NE2'],
['CB', 'CG', 'CD', 'OE1', 'NE2']],
'GLU': [
['N', 'CA', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
['CA', 'CB', 'CG', 'CD', 'OE1', 'OE2'],
['CB', 'CG', 'CD', 'OE1', 'OE2']],
'HIS': [
['N', 'CA', 'CB', 'CG', 'ND1', 'CE1', 'ND2'],
['CA', 'CB', 'CG', 'ND1', 'CE1', 'ND2']],
'ILE': [
['N', 'CA', 'CB', 'CG1', 'CG2', 'CD1'],
['CA', 'CB', 'CG1', 'CD1', 'CG2']],
'LEU': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2'],
['CA', 'CB', 'CG', 'CD1', 'CD2']],
'LYS': [
['N', 'CA', 'CB', 'CG', 'CD', 'CE', 'NZ'],
['CA', 'CB', 'CG', 'CD', 'CE', 'NZ'],
['CB', 'CG', 'CD', 'CE', 'NZ'],
['CG', 'CD', 'CE', 'NZ']],
'MET': [
['N', 'CA', 'CB', 'CG', 'SD', 'CE'],
['CA', 'CB', 'CG', 'SD', 'CE'],
['CB', 'CG', 'SD', 'CE']],
'PHE': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ']],
'PRO': [
['N', 'CA', 'CB', 'CG', 'CD'],
['CA', 'CB', 'CG', 'CD']],
'SER': [['N', 'CA', 'CB', 'OG']],
'THR': [['N', 'CA', 'CB', 'OG1', 'CG2']],
'TRP': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2',
'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE2', 'CE3', 'NE1', 'CZ2', 'CZ3', 'CH2']],
'TYR': [
['N', 'CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH'],
['CA', 'CB', 'CG', 'CD1', 'CD2', 'CE1', 'CE2', 'CZ', 'OH']],
'VAL': [['N', 'CA', 'CB', 'CG1', 'CG2']]
}
# Data taken from http://web.expasy.org/protscale/ unless otherwise stated. Original reference also given.
# Levitt M. Biochemistry 17:4277-4285(1978)
a_helix_Levitt = {
'A': 1.29, 'C': 1.11, 'D': 1.04, 'E': 1.44, 'F': 1.07,
'G': 0.56, 'H': 1.22, 'I': 0.97, 'K': 1.23, 'L': 1.3,
'M': 1.47, 'N': 0.9, 'P': 0.52, 'Q': 1.27, 'R': 0.96,
'S': 0.82, 'T': 0.82, 'V': 0.91, 'W': 0.99, 'Y': 0.72
}
# Janin J. Nature 277:491-492(1979)
accessibility_Janin = {
'A': 6.6, 'C': 0.9, 'D': 7.7, 'E': 5.7, 'F': 2.4,
'G': 6.7, 'H': 2.5, 'I': 2.8, 'K': 10.3, 'L': 4.8,
'M': 1.0, 'N': 6.7, 'P': 4.8, 'Q': 5.2, 'R': 4.5,
'S': 9.4, 'T': 7.0, 'V': 4.5, 'W': 1.4, 'Y': 5.1
}
# Bhaskaran R., Ponnuswamy P.K. Int. J. Pept. Protein. Res. 32:242-255(1988)
avg_flex_index = {
'A': 0.36, 'C': 0.35, 'D': 0.51, 'E': 0.5, 'F': 0.31,
'G': 0.54, 'H': 0.32, 'I': 0.46, 'K': 0.47, 'L': 0.37,
'M': 0.3, 'N': 0.46, 'P': 0.51, 'Q': 0.49, 'R': 0.53,
'S': 0.51, 'T': 0.44, 'V': 0.39, 'W': 0.31, 'Y': 0.42
}
# Levitt M. Biochemistry 17:4277-4285(1978)
beta_sheet_Levitt = {
'A': 0.9, 'C': 0.74, 'D': 0.72, 'E': 0.75, 'F': 1.32,
'G': 0.92, 'H': 1.08, 'I': 1.45, 'K': 0.77, 'L': 1.02,
'M': 0.97, 'N': 0.76, 'P': 0.64, 'Q': 0.8, 'R': 0.99,
'S': 0.95, 'T': 1.21, 'V': 1.49, 'W': 1.14, 'Y': 1.25
}
# Levitt M. Biochemistry 17:4277-4285(1978)
beta_turn_Levitt = {
'A': 0.77, 'C': 0.81, 'D': 1.41, 'E': 0.99, 'F': 0.59,
'G': 1.64, 'H': 0.68, 'I': 0.51, 'K': 0.96, 'L': 0.58,
'M': 0.41, 'N': 1.28, 'P': 1.91, 'Q': 0.98, 'R': 0.88,
'S': 1.32, 'T': 1.04, 'V': 0.47, 'W': 0.76, 'Y': 1.05
}
# Zimmerman J.M., Eliezer N., Simha R. J. Theor. Biol. 21:170-201(1968)
bulkiness = {
'A': 11.5, 'C': 13.46, 'D': 11.68, 'E': 13.57, 'F': 19.8,
'G': 3.4, 'H': 13.69, 'I': 21.4, 'K': 15.71, 'L': 21.4,
'M': 16.25, 'N': 12.82, 'P': 17.43, 'Q': 14.45, 'R': 14.28,
'S': 9.47, 'T': 15.77, 'V': 21.57, 'W': 21.67, 'Y': 18.03
}
# Kyte J., Doolittle R.F. J. Mol. Biol. 157:105-132(1982)
hydropathicity = {
'A': 1.8, 'C': 2.5, 'D': -3.5, 'E': -3.5, 'F': 2.8,
'G': -0.4, 'H': -3.2, 'I': 4.5, 'K': -3.9, 'L': 3.8,
'M': 1.9, 'N': -3.5, 'P': -1.6, 'Q': -3.5, 'R': -4.5,
'S': -0.8, 'T': -0.7, 'V': 4.2, 'W': -0.9, 'Y': -1.3
}
# http://biopython.org/DIST/docs/api/Bio.PDB.DSSP%27-pysrc.html
# Sander & Rost, (1994), Proteins, 20:216-226
max_asa = {
'A': 106.0, 'C': 135.0, 'D': 163.0, 'E': 194.0, 'F': 197.0,
'G': 84.0, 'H': 184.0, 'I': 169.0, 'K': 205.0, 'L': 164.0,
'M': 188.0, 'N': 157.0, 'P': 136.0, 'Q': 198.0, 'R': 135.0,
'S': 130.0, 'T': 142.0, 'V': 142.0, 'W': 227.0, 'Y': 222.0
}
# Grantham R. Science 185:862-864(1974)
polarity_Grantham = {
'A': 8.1, 'C': 5.5, 'D': 13.0, 'E': 12.3, 'F': 5.2,
'G': 9.0, 'H': 10.4, 'I': 5.2, 'K': 11.3, 'L': 4.9,
'M': 5.7, 'N': 11.6, 'P': 8.0, 'Q': 10.5, 'R': 10.5,
'S': 9.2, 'T': 8.6, 'V': 5.9, 'W': 5.4, 'Y': 6.2
}
# Zimmerman J.M., Eliezer N., Simha R. J. Theor. Biol. 21:170-201(1968)
polarity_Zimmerman = {
'A': 0.0, 'C': 1.48, 'D': 49.7, 'E': 49.9, 'F': 0.35,
'G': 0.0, 'H': 51.6, 'I': 0.13, 'K': 49.5, 'L': 0.13,
'M': 1.43, 'N': 3.38, 'P': 1.58, 'Q': 3.53, 'R': 52.0,
'S': 1.67, 'T': 1.66, 'V': 0.13, 'W': 2.1, 'Y': 1.61
}
# Fraga S. Can. J. Chem. 60:2606-2610(1982)
recognition_factors = {
'A': 78.0, 'C': 89.0, 'D': 81.0, 'E': 78.0, 'F': 81.0,
'G': 84.0, 'H': 84.0, 'I': 88.0, 'K': 87.0, 'L': 85.0,
'M': 80.0, 'N': 94.0, 'P': 91.0, 'Q': 87.0, 'R': 95.0,
'S': 107.0, 'T': 93.0, 'V': 89.0, 'W': 104.0, 'Y': 84.0
}
# Jones. D.D. J. Theor. Biol. 50:167-184(1975)
refractivity = {
'A': 4.34, 'C': 35.77, 'D': 12.0, 'E': 17.26, 'F': 29.4,
'G': 0.0, 'H': 21.81, 'I': 19.06, 'K': 21.29, 'L': 18.78,
'M': 21.64, 'N': 13.28, 'P': 10.93, 'Q': 17.56, 'R': 26.66,
'S': 6.35, 'T': 11.01, 'V': 13.92, 'W': 42.53, 'Y': 31.53
}
# Dayhoff M.O., Schwartz R.M., Orcutt B.C. In "Atlas of Protein Sequence and Structure", Vol.5, Suppl.3 (1978)
relative_mutability = {
'A': 100.0, 'C': 20.0, 'D': 106.0, 'E': 102.0, 'F': 41.0,
'G': 49.0, 'H': 66.0, 'I': 96.0, 'K': 56.0, 'L': 40.0,
'M': 94.0, 'N': 134.0, 'P': 56.0, 'Q': 93.0, 'R': 65.0,
'S': 120.0, 'T': 97.0, 'V': 74.0, 'W': 18.0, 'Y': 41.0
}
# Meek J.L. Proc. Natl. Acad. Sci. USA 77:1632-1636(1980)
retention_coeff_hplc_pH7pt4 = {
'A': 0.5, 'C': -6.8, 'D': -8.2, 'E': -16.9, 'F': 13.2,
'G': 0.0, 'H': -3.5, 'I': 13.9, 'K': 0.1, 'L': 8.8,
'M': 4.8, 'N': 0.8, 'P': 6.1, 'Q': -4.8, 'R': 0.8,
'S': 1.2, 'T': 2.7, 'V': 2.7, 'W': 14.9, 'Y': 6.1
}
# Zhao, G., London E. Protein Sci. 15:1987-2001(2006)
transmembrane_tendancy = {
'A': 0.38, 'C': -0.3, 'D': -3.27, 'E': -2.9, 'F': 1.98,
'G': -0.19, 'H': -1.44, 'I': 1.97, 'K': -3.46, 'L': 1.82,
'M': 1.4, 'N': -1.62, 'P': -1.44, 'Q': -1.84, 'R': -2.57,
'S': -0.53, 'T': -0.32, 'V': 1.46, 'W': 1.53, 'Y': 0.49
}
# Bairoch A. Release notes for UniProtKB/Swiss-Prot release 2013_04 - April 2013
uniprot_composition_2013 = {
'A': 8.25, 'C': 1.37, 'D': 5.45, 'E': 6.75, 'F': 3.86,
'G': 7.07, 'H': 2.27, 'I': 5.96, 'K': 5.84, 'L': 9.66,
'M': 2.42, 'N': 4.06, 'P': 4.7, 'Q': 3.93, 'R': 5.53,
'S': 6.56, 'T': 5.34, 'V': 6.87, 'W': 1.08, 'Y': 2.92
}
number_of_codons = {
'A': 4, 'C': 1, 'D': 2, 'E': 2, 'F': 2,
'G': 4, 'H': 2, 'I': 3, 'K': 2, 'L': 6,
'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6,
'S': 6, 'T': 4, 'V': 4, 'W': 1, 'Y': 2
}
# pI, pk_COOH, pK_NH3, pK_Rgroup all taken from:
# http://www.sigmaaldrich.com/life-science/metabolomics/learning-center/amino-acid-reference-chart.html#prop'
# D. R. Lide, Handbook of Chemistry and Physics, 72nd Edition, CRC Press, Boca Raton, FL, 1991.
pI = {
'A': 6.0, 'C': 5.07, 'D': 2.77, 'E': 3.22, 'F': 5.48,
'G': 5.97, 'H': 7.59, 'I': 6.02, 'K': 9.74, 'L': 5.98,
'M': 5.74, 'N': 5.41, 'P': 6.3, 'Q': 5.65, 'R': 10.76,
'S': 5.68, 'T': 5.6, 'V': 5.96, 'W': 5.89, 'Y': 5.66
}
pK_COOH = {
'A': 2.34, 'C': 1.96, 'D': 1.88, 'E': 2.19, 'F': 1.83,
'G': 2.34, 'H': 1.82, 'I': 2.36, 'K': 2.18, 'L': 2.36,
'M': 2.28, 'N': 2.02, 'P': 1.99, 'Q': 2.17, 'R': 2.17,
'S': 2.21, 'T': 2.09, 'V': 2.32, 'W': 2.83, 'Y': 2.2
}
pK_NH3 = {
'A': 9.69, 'C': 10.28, 'D': 9.6, 'E': 9.67, 'F': 9.13,
'G': 9.6, 'H': 9.17, 'I': 9.6, 'K': 8.95, 'L': 9.6,
'M': 9.21, 'N': 8.8, 'P': 10.6, 'Q': 9.13, 'R': 9.04,
'S': 9.15, 'T': 9.1, 'V': 9.62, 'W': 9.39, 'Y': 9.11
}
pK_Rgroup = {
'A': None, 'C': 8.18, 'D': 3.65, 'E': 4.25, 'F': None,
'G': None, 'H': 6.0, 'I': None, 'K': 10.53, 'L': None,
'M': None, 'N': None, 'P': None, 'Q': None, 'R': 12.48,
'S': None, 'T': None, 'V': None, 'W': None, 'Y': 10.07
}
def get_aa_code(aa_letter):
""" Get three-letter aa code if possible. If not, return None.
If three-letter code is None, will have to find this later from the filesystem.
Parameters
----------
aa_letter : str
One-letter amino acid code.
Returns
-------
aa_code : str, or None
Three-letter aa code.
"""
aa_code = None
if aa_letter != 'X':
for key, val in standard_amino_acids.items():
if key == aa_letter:
aa_code = val
return aa_code
def get_aa_letter(aa_code):
""" Get one-letter version of aa_code if possible. If not, return 'X'.
Parameters
----------
aa_code : str
Three-letter amino acid code.
Returns
-------
aa_letter : str
One-letter aa code.
Default value is 'X'.
"""
aa_letter = 'X'
for key, val in standard_amino_acids.items():
if val == aa_code:
aa_letter = key
return aa_letter
def get_aa_info(code):
"""Get dictionary of information relating to a new amino acid code not currently in the database.
Notes
-----
Use this function to get a dictionary that is then to be sent to the function add_amino_acid_to_json().
use to fill in rows of amino_acid table for new amino acid code.
Parameters
----------
code : str
Three-letter amino acid code.
Raises
------
IOError
If unable to locate the page associated with the amino acid name on the PDBE site.
Returns
-------
aa_dict : dict
Keys are AminoAcidDB field names.
Values are the str values for the new amino acid, scraped from the PDBE if possible. None if not found.
"""
letter = 'X'
# Try to get content from PDBE.
url_string = 'http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/{0}'.format(
code)
r = requests.get(url_string)
# Raise error if content not obtained.
if not r.ok:
raise IOError("Could not get to url {0}".format(url_string))
# Parse r.text in an ugly way to get the required information.
description = r.text.split('<h3>Molecule name')[1].split('</tr>')[0]
description = description.strip().split('\n')[3].strip()[:255]
modified = r.text.split("<h3>Standard parent ")[1].split('</tr>')[0]
modified = modified.replace(" ", "").replace(
'\n', '').split('<')[-3].split('>')[-1]
if modified == "NotAssigned":
modified = None
# Add the required information to a dictionary which can then be passed to add_amino_acid_to_json.
aa_dict = {'code': code, 'description': description,
'modified': modified, 'letter': letter}
return aa_dict
def add_amino_acid_to_json(code, description, letter='X', modified=None, force_add=False):
""" Add an amino acid to the amino_acids.json file used to populate the amino_acid table.
Parameters
----------
code : str
New code to be added to amino acid table.
description : str
Description of the amino acid, e.g. 'amidated terminal carboxy group'.
letter : str, optional
One letter code for the amino acid.
Defaults to 'X'
modified : str or None, optional
Code of modified amino acid, e.g. 'ALA', or None.
Defaults to None
force_add : bool, optional
If True, will over-write existing dictionary value for code if already in amino_acids.json.
If False, then an IOError is raised if code is already in amino_acids.json.
Raises
------
IOError
If code is already in amino_acids.json and force_add is False.
Returns
-------
None
"""
# If code is already in the dictionary, raise an error
if (not force_add) and code in AMINO_ACIDS_DATA.keys():
raise IOError("{0} is already in the amino_acids dictionary, with values: {1}".format(
code, AMINO_ACIDS_DATA[code]))
# Prepare data to be added.
add_code = code
add_code_dict = {'description': description,
'letter': letter, 'modified': modified}
# Check that data does not already exist, and if not, add it to the dictionary.
AMINO_ACIDS_DATA[add_code] = add_code_dict
# Write over json file with updated dictionary.
with open(_amino_acids_json_path, 'w') as foo:
foo.write(json.dumps(AMINO_ACIDS_DATA))
return
__author__ = 'Jack W. Heal' | AMPAL | /AMPAL-1.4.0.tar.gz/AMPAL-1.4.0/src/ampal/amino_acids.py | amino_acids.py |
# AMPcombi : AntiMicrobial Peptides parsing and functional classification tool
<img src="https://raw.githubusercontent.com/Darcy220606/AMPcombi/main/docs/amp-combi-logo.png" width="620" height="200" />
This tool parses the results of antimicrobial peptide (AMP) prediction tools into a single table and aligns the hits against a reference AMP database for functional classifications.
For parsing: AMPcombi is developed to parse the output of these **AMP prediction tools**:
| Tool | Version | Link |
| ------------- | ------------- | ------------- |
| Ampir | 1.1.0 | https://github.com/Legana/ampir |
| AMPlify | 1.0.3 | https://github.com/bcgsc/AMPlify |
| Macrel | 1.1.0 | https://github.com/BigDataBiology/macrel |
| HMMsearch | 3.3.2 | https://github.com/EddyRivasLab/hmmer |
| EnsembleAMPpred | - | https://pubmed.ncbi.nlm.nih.gov/33494403/ |
| NeuBI | - | https://github.com/nafizh/NeuBI |
For classification: AMPcombi is developed to offer functional annotation of the detected AMPs by alignment to an **AMP reference databases**, for e.g.,:
| Tool | Version | Link |
| ------------- | ------------- | ------------- |
| DRAMP | 3.0 | https://github.com/CPU-DRAMP/DRAMP-3.0 |
Alignment to the reference database is done using [diamond blastp v.2.0.15](https://www.nature.com/articles/s41592-021-01101-x)
======================
## Installation
======================
To install AMPcombi:
Add dependencies of the tool; `python` > 3.0, `biopython`, `pandas` and `diamond`.
Installation can be done using:
- pip installation
```
pip install AMPcombi
```
- git repository
```
git clone https://github.com/Darcy220606/AMPcombi.git
```
- conda
```
conda env create -f ampcombi/environment.yml
```
or
```
conda install -c bioconda AMPcombi
```
======================
## Usage:
======================
There are two basic commands to run AMPcombi:
1. Using `--amp_results`
```console
ampcombi \
--amp_results path/to/my/result_folder/ \
--faa path/to/sample_faa_files/
```
Here the head folder containing output files has to be given. AMPcombi finds and summarizes the output files from different tools, if the folder is structured and named as: `/result_folder/toolsubdir/samplesubdir/sample.tool.filetype`.
- Note that the filetype ending might vary and can be specified with `--tooldict`, if it is different from the default. When passing a dictionary via command line, this has to be done as a string with single quotes `' '` and the dictionary keys and items with double quotes `" "`. i.e. `'{"key1":"item1", "key2":"item2"}'`
- Note that `--sample_list` can also be given if only specfic samples are needed from the driectory.
The path to the folder containing the respective protein fasta files has to be provided with `--faa`. The files have to be named with `<samplename>.faa`.
Structure of the results folder:
```console
amp_results/
├── tool_1/
| ├── sample_1/
| | └── sample_1.tool_1.tsv
| └── sample_2/
| | └── sample_2.tool_1.tsv
├── tool_2/
| ├── sample_1/
| | └── sample_1.tool_2.txt
| └── sample_2/
| | └── sample_2.tool_2.txt
├── tool_3/
├── sample_1/
| └── sample_1.tool_3.predict
└── sample_2/
└── sample_2.tool_3.predict
```
2. Using `--path_list` and `--sample_list`
```console
ampcombi \
--path_list path_to_sample_1_tool_1.csv path_to_sample_1_tool_1.csv \
--path_list path_to_sample_2_tool_1.csv path_to_sample_2_tool_1.csv \
--sample_list sample_1 sample_2 \
--faa path/to/sample_faa_files/
```
Here the paths to the output-files to be summarized can be given by `--path_list` for each sample. Together with this option a list of sample-names has to be supplied.
Either the path to the folder containing the respective protein fasta files has to be provided with `--faa` or, in case of only one sample, the path to the corresponding `.faa` file. The files have to be named with `<samplename>.faa`.
### Input options:
| command | definition | default | example |
| ------------- | ------------- | ------------- | ------------- |
| --amp_results | path to the folder containing different tool's output files | ./test_files/ | ../amp_results/ |
| --sample_list | list of samples' names | - | sample_1 sample_2 |
| --path_list | list of paths to output files | - | path_to_sample_1_tool_1.csv path_to_sample_1_tool_1.csv |
| --cutoff | probability cutoff to filter AMPs | 0 | 0.5 |
| --faa | path to the folder containing the samples`.faa` files or, in case of only one sample, the path to the corresponding `.faa` file. Filenames have to contain the corresponding sample-name, i.e. sample_1.faa | ./test_faa/ | ./faa_files/|
| --tooldict | dictionary of AMP-tools and their respective output file endings | '{"ampir":"ampir.tsv", "amplify":"amplify.tsv", "macrel":"macrel.tsv", "hmmer_hmmsearch":"hmmsearch.txt", "ensembleamppred":"ensembleamppred.txt"}' | - |
| --amp_database | path to the folder containing the reference database files: (1) a fasta file with <.fasta> file extension and (2) the corresponding table with with functional and taxonomic classifications in <.tsv> file extension | [DRAMP 'general amps'](http://dramp.cpu-bioinfor.org/downloads/) database | ./amp_ref_database/ |
| --complete_summary | concatenates all samples' summarized tables into one and generates both 'csv' and interactive 'html' files | False | True |
| --log | print messages into log file instead of stdout | False | True |
| --threads | adjust the number of threads required for DIAMOND alignemnt depending on the computing resources available | 4 | 32 |
| --version | print the version number into stdout | - | 0.1.4 |
- Note: The fasta file corresponding to the AMP database should not contain any characters other than ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
- Note: The reference database table should be tab delimited.
### Output:
The output will be written into your working directory, containing the following files and folders:
```console
<pwd>/
├── amp_ref_database/
| ├── amp_ref.dmnd
| ├── general_amps_<DATE>_clean.fasta
| └── general_amps_<DATE>.tsv
├── sample_1/
| ├── sample_1_amp.faa
| ├── sample_1_ampcombi.csv
| └── sample_1_diamond_matches.txt
├── sample_2/
| ├── sample_2_amp.faa
| ├── sample_2_ampcombi.csv
| └── sample_2_diamond_matches.txt
├── AMPcombi_summary.csv
├── AMPcombi_summary.html
└── ampcombi.log
```
======================
## Contribution:
======================
AMPcombi is a tool developed for parsing results from published AMP prediction tools. We therefore welcome fellow contributors who would like to add new AMP prediction tools results for parsing and alignment.
### Adding a new tool to AMPcombi
In `ampcombi/reformat_tables.py`
- add a new tool function to read the output to a pandas dataframe and return two columns named `contig_id` and `prob_<toolname>`
- add the new function to the `read_path` function
In `ampcombi/main.py`
- add your default `tool:tool.fileending` to the default of `--tooldict`
======================
**Authors**: @louperelo and @darcy220606
| AMPcombi | /AMPcombi-0.1.7.tar.gz/AMPcombi-0.1.7/README.md | README.md |
# TITLE: Reformat the AMP output tables
import pandas as pd
from Bio import SeqIO
import os
#########################################
# FUNCTION: KEEP ONLY LINES WITH KEYWORD
#########################################
def trim_text(filepath, key):
lines = []
# read file
with open(filepath, 'r') as fp:
# read an store all lines into list
lines = fp.readlines()
# Write file
with open(filepath, 'w') as fp:
# iterate each line
for line in lines:
if key in line:
fp.write(line)
#########################################
# FUNCTION: KEEP ONLY LINES WITH KEYWORD
#########################################
def check_dfshape(df1, df2):
if (df1.shape[0] != df2.shape[0]):
print(f'ERROR: different row number in tool output and faa file. Ensembleamppred output could not be included in the summary')
return False
else:
return True
#########################################
# FUNCTIONS: READ TOOLS' OUTPUT TO DFs
#########################################
#########################################
# AMP_ampir
#########################################
def ampir(path, p):
# Dictionary to rename columns
ampir_dict = {'seq_name':'contig_id', 'seq_aa':'seq_aa', 'prob_AMP':'prob_ampir'}
# read file as df and rename columns
ampir_df = pd.read_csv(path, sep='\t').rename(columns=ampir_dict)
# cut contig_id to remove extra information added by tool
ampir_df['contig_id']=ampir_df['contig_id'].apply(lambda x: x.split()[0])
# apply probability cutoff
ampir_df = ampir_df[(ampir_df['prob_ampir']>=p)]
return ampir_df[['contig_id', 'prob_ampir']]
#########################################
# AMP_amplify
#########################################
def amplify(path, p):
amplify_dict = {'Sequence_ID':'contig_id', 'Sequence':'seq_aa', 'Length':'length', 'Charge':'charge', 'Probability_score':'prob_amplify', 'AMPlify_log_scaled_score':'log_score', 'Prediction':'prediction'}
amplify_df = pd.read_csv(path, sep='\t').rename(columns=amplify_dict).dropna()
# apply probability cutoff
amplify_df = amplify_df[(amplify_df['prob_amplify']>=p)]
return amplify_df[['contig_id', 'prob_amplify']]
#########################################
# AMP_ensembleamppred
#########################################
def amppred(path, p):
trim_text(path, 'Sequence')
amppred_dict = {4:'index', 14:'prob_amppred'} #{'level_0':1, 'level_1':2, 'level_2':'index', 'level_3':3, 'level_4':4, 'level_5':5, '############':6, 'Prediction':7, 'results':8, 'by':9, 'EnsembleAMPPred':10, '#############':'prob_amppred'}
amppred_df = pd.read_csv(path, sep=' ', header=None).rename(columns=amppred_dict)
amppred_df = amppred_df[(amppred_df['prob_amppred']>=p)]
return amppred_df[['index', 'prob_amppred']]
#########################################
# AMP_macrel
#########################################
def macrel(path, p):
macrel_dict = {'Access':'contig_id', 'Sequence':'seq_aa', 'AMP_family':'amp_family', 'AMP_probability':'prob_macrel', 'Hemolytic':'hemolytic', 'Hemolytic_probability':'prob_hemo'}
#set header to second row to skip first line starting with #
macrel_df = pd.read_csv(path, sep='\t', header=[1]).rename(columns=macrel_dict)
# apply probability cutoff
macrel_df = macrel_df[(macrel_df['prob_macrel']>=p)]
return macrel_df[['contig_id', 'prob_macrel']]
#########################################
# AMP_neubi
#########################################
def neubi(path, p):
neubi_seq = SeqIO.parse(open(path), 'fasta')
#initiate the dataframe containing contig ids, aa-sequences and probability in three columns
neubi_df = pd.DataFrame(columns=['contig_id', 'aa_sequence', 'prob_neubi'])
#append contig information to df (p is last value in header following '|' symbol)
for contig in neubi_seq:
contig_id, sequence, description = contig.id, str(contig.seq), float(contig.description.split("|",1)[1])
neubi_df = neubi_df.append({'contig_id':contig_id, 'aa_sequence':sequence, 'prob_neubi':description}, ignore_index=True)
neubi_df = neubi_df[(neubi_df['prob_neubi']>=p)]
return neubi_df[['contig_id', 'prob_neubi']]
#########################################
# AMP_hmmsearch
#########################################
def hmmsearch(path):
# list of words in header rows to be removed
key_words = ["# hmmsearch ::", "# HMMER ", "# Copyright (C) ", "# Freely distributed",
"# - - - ", "# query HMM file:", "# target sequence database:",
"# output directed to file:", "Query:", "Accession:",
"Description:", "Scores for complete sequences", "--- full sequence",
"# number of worker threads:", "inclusion threshold", "E-value", "-------"]
no_hits = "[No hits detected that satisfy reporting thresholds]"
hmmer_dict = {0:'evalue_hmmer', 1:'score_hmmer', 2:'bias', 3:'eval_domain', 4:'score_domain', 5:'bias_domain', 6:'exp_dom', 7:'N_dom', 8:'contig_id'}
# open the file and read line by line
with open(path, "r") as fp:
lines = fp.readlines()
# Open hmmer_tmp.txt file and only write lines not containing any of key_words
with open("hmmer_tmp.txt", "w") as fp:
for line in lines:
if not any(phrase in line for phrase in key_words):
fp.write(line)
with open('hmmer_tmp.txt') as tmp:
if no_hits in tmp.read():
print('The hmmersearch-file did not contain any hits')
hmmer_df = pd.DataFrame(columns=[val for val in hmmer_dict.values()])
else:
hmmer_df = pd.read_table("hmmer_tmp.txt", delim_whitespace=True, header=None).reset_index().rename(columns=hmmer_dict).drop(columns = [9,10,11,12,13,14,15,16]).dropna()
for index, row in hmmer_df.iterrows():
#identify the footer part of the file: index of first row with '#'
if (row.str.contains('#').any()):
i = index
break
# eliminate all rows with footer information
hmmer_df = hmmer_df[hmmer_df.index<i]
#remove the temporary file
os.remove('hmmer_tmp.txt')
return hmmer_df[['contig_id', 'evalue_hmmer']]
#########################################
# FUNCTION: READ DFs PER SAMPLE
#########################################
# For one sample: parse filepaths and read files to dataframes, create list of dataframes
def read_path(df_list, file_list, p, dict, faa_path, samplename):
for path in file_list:
if(path.endswith(dict['ampir'])):
print('found ampir file')
df_list.append(ampir(path, p))
elif(path.endswith(dict['amplify'])):
print('found amplify file')
df_list.append(amplify(path, p))
elif(path.endswith(dict['macrel'])):
print('found macrel file')
df_list.append(macrel(path, p))
elif(path.endswith(dict['neubi'])):
print('found neubi file')
df_list.append(neubi(path, p))
elif(path.endswith(dict['hmmer_hmmsearch'])):
print('found hmmersearch file')
df_list.append(hmmsearch(path))
elif(path.endswith(dict['ensembleamppred'])):
print('found ensemblamppred file')
faa_filepath = faa_path+samplename+'.faa'
faa_df = faa2table(faa_filepath)
amppred_df = amppred(path, p)
if(check_dfshape(amppred_df, faa_df)):
# add contig_ids via index numbers, because ensembleamppred only gives numbered sequences without ids, in the order of sequences in faa
amppred_df = pd.merge(amppred_df, faa_df.reset_index(), on='index')
amppred_df.drop(['index', 'aa_sequence'], axis=1)
df_list.append(amppred_df)
else:
print(f'No AMP-output-files could be found with the given path ({path}). \n Please check your file paths and file endings or use the <--path-list> command')
break
#########################################
# FUNCTION: MERGE DATAFRAMES
#########################################
# merge dataframes from list to summary output per sample
def summary(df_list, samplename, faa_path):
#initiate merge_df
merge_df = pd.DataFrame(columns=['contig_id'])
#merge all dfs in the df-list on contig_id
for df in df_list:
merge_df = pd.merge(merge_df, pd.DataFrame(df) , how='outer', on='contig_id')
#replace all NAs (where a tool did not identify the contig as AMP) with 0
merge_df = merge_df.fillna(0)
#add amino-acid sequences
faa_df = faa2table(faa_path)
merge_df = merge_df.merge(faa_df, how='inner', on='contig_id')
# sort by sum of p-values over rows
merge_df = merge_df.set_index('contig_id')
merge_df['p_sum']= merge_df.sum(axis=1)#.sort_values(ascending=False)
merge_df = merge_df.sort_values('p_sum', ascending=False).drop('p_sum', axis=1).reset_index()
return merge_df
#########################################
# FUNCTION: READ FAA TO TABLE
#########################################
# transform faa to dataframe with two columns
def faa2table(faa_path):
#read the amino-acid fasta with SeqIO
faa_seq = SeqIO.parse(open(faa_path), 'fasta')
#initiate the dataframe containing contig ids and aa-sequences in two columns
fasta_df = pd.DataFrame(columns=['contig_id', 'aa_sequence'])
#append contig information to df
for contig in faa_seq:
contig_id, sequence = contig.id, str(contig.seq)
fasta_df = fasta_df.append({'contig_id':contig_id, 'aa_sequence':sequence}, ignore_index=True)
return fasta_df | AMPcombi | /AMPcombi-0.1.7.tar.gz/AMPcombi-0.1.7/ampcombi/reformat_tables.py | reformat_tables.py |
import os
import sys
import pathlib
from amp_database import download_DRAMP
def check_samplelist(samplelist, tools, path):
if(samplelist==[]):
print('<--sample-list> was not given, sample names will be inferred from directory names')
for dirpath, subdirs, files in os.walk(path):
for dir in subdirs:
if (dir not in tools):
samplelist.append(dir)
return list(set(samplelist))
else:
return samplelist
def check_pathlist(filepaths, samplelist, fileending, path):
if(filepaths==[]):
print('<--path-list> was not given, paths to AMP-results-files will be inferred')
for sample in samplelist:
pathlist = []
for dirpath, subdirs, files in os.walk(path):
for file in files:
if ((sample in dirpath) and ((list(filter(file.endswith, fileending))!=[]))):
pathlist.append(dirpath+'/'+file)
filepaths.append(pathlist)
return filepaths
else:
return filepaths
def check_faa_path(faa_path, samplename):
if(os.path.isdir(faa_path)):
path_list = list(pathlib.Path(faa_path).rglob(f"*{samplename}*.faa"))
if (len(path_list)>1):
sys.exit(f'AMPcombi interrupted: There is more than one .faa file for {samplename} in the folder given with --faa_path')
elif(not path_list):
sys.exit(f'AMPcombi interrupted: There is no .faa file containing {samplename} in the folder given with --faa_path')
return path_list[0]
elif(os.path.isfile(faa_path)):
return faa_path
else:
sys.exit(f'AMPcombi interrupted: The input given with --faa_path does not seem to be a valid directory or file. Please check.')
def check_ref_database(database):
if((database==None) and (not os.path.exists('amp_ref_database'))):
print('<--AMP_database> was not given, the current DRAMP general-AMP database will be downloaded and used')
database = 'amp_ref_database'
os.makedirs(database, exist_ok=True)
db = database
download_DRAMP(db)
return db
elif ((not database==None)):
if (os.path.exists(database)):
db = database
print(f'<--AMP_database> = ${db} is found and will be used')
return db
if (not os.path.exists(database)):
sys.exit(f'Reference amp database path {database} does not exist, please check the path.')
elif((database==None) and (os.path.exists('amp_ref_database'))):
print('<--AMP_database> = DRAMP is already downloaded and will be reused')
database = 'amp_ref_database'
db = database
return db
def check_path(path):
return os.path.exists(path) #returns True or False
def check_directory_tree(path, tools, samplelist):
print(f'Checking directory tree {path} for sub-directories \n ')
# get first level of sub-directories, check if at least one is named by a tool-name
subdirs_1 = [x for x in os.listdir(path) if x in tools]
if (not subdirs_1):
sys.exit(f'AMPcombi interrupted: First level sub-directories in {path} are not named by tool-names. Please check the directories names and the keys given in "--tooldict". \n ')
else:
print('First level sub-directories passed check.')
# get second level of sub-directories, check if at least one is named by a sample-name
subdirs_2 = []
for dir in subdirs_1:
subdirs = [x for x in os.listdir(path+dir) if x in samplelist]
if (subdirs):
subdirs_2.append(subdirs)
if (not subdirs_2):
sys.exit(f'AMPcombi interrupted: Second level sub-directories in {path} are not named by sample-names. Please check the directories names and the names given as "--sample_list" \n ')
else:
print('Second level sub-directories passed check')
print('Finished directory check')
def check_input_complete(path, samplelist, filepaths, tools):
# 1. Head folder does not exist and filepaths-list was not given
if((not check_path(path)) and (not filepaths)):
sys.exit('AMPcombi interrupted: Please provide the correct path to either the folder containing all amp files to be summarized (--amp_results) or the list of paths to the files (--path_list)')
# 2. Head folder does not exist, filepaths-list was given but no samplelist
elif((not check_path(path)) and (filepaths) and (not samplelist)):
sys.exit('AMPcombi interrupted: Please provide a list of sample-names (--sample_list) in addition to --path_list')
# 3. Head folder does not exist, filepaths- and samplelist are given:
elif((not check_path(path)) and (not filepaths) and (not samplelist)):
for file in filepaths:
print(f'in check_input_complete the file in filepath is:')
# 3.1. check if paths in filepath-list exist
if(not check_path(file)):
sys.exit(f'AMPcombi interrupted: The path {file} does not exist. Please check the --path_list input.')
# 3.2. check if paths contain sample-names from samplelist
if(not any(n in file for n in samplelist)):
sys.exit(f'AMPcombi interrupted: The path {file} does not contain any of the sample-names given in --sample_list')
# 4. Head folder and sample-list are given
elif((check_path(path)) and (not samplelist)):
check_directory_tree(path, tools, samplelist) | AMPcombi | /AMPcombi-0.1.7.tar.gz/AMPcombi-0.1.7/ampcombi/check_input.py | check_input.py |
import os
import argparse
import warnings
from contextlib import redirect_stdout
from version import __version__
import json
import os.path
# import functions from sub-scripts to main:
from reformat_tables import *
from amp_fasta import *
from check_input import *
from amp_database import *
from print_header import *
from visualise_complete_summary import *
# Define input arguments:
parser = argparse.ArgumentParser(prog = 'ampcombi', formatter_class=argparse.RawDescriptionHelpFormatter,
description=('''\
.............................................................................
*AMP-combi*
.............................................................................
This tool parses the results of amp prediction tools
and aligns the hits against reference databases for functional classification.
For detailed usage documentation please refer to <github_repo>
.............................................................................'''),
epilog='''Thank you for running AMP-combi!''',
add_help=True)
parser.add_argument("--amp_results", dest="amp", nargs='?', help="Enter the path to the folder that contains the different tool's output files in sub-folders named by sample name. \n If paths are to be inferred, sub-folders in this results-directory have to be organized like '/amp_results/toolsubdir/samplesubdir/tool.sample.filetype' \n (default: %(default)s)",
type=str, default='./test_files/')
parser.add_argument("--sample_list", dest="samples", nargs='*', help="Enter a list of sample-names, e.g. sample_1 sample_2 sample_n. \n If not given, the sample-names will be inferred from the folder structure",
default=[])
parser.add_argument("--path_list", dest="files", nargs='*', action='append', help="Enter the list of paths to the files to be summarized as a list of lists, e.g. --path_list path/to/my/sample1.ampir.tsv path/to/my/sample1.amplify.tsv --path_list path/to/my/sample2.ampir.ts path/to/my/sample2.amplify.tsv. \n If not given, the file-paths will be inferred from the folder structure",
default=[])
parser.add_argument("--cutoff", dest="p", help="Enter the probability cutoff for AMPs \n (default: %(default)s)",
type=int, default=0)
parser.add_argument("--faa", dest="faa", help="Enter the path to the folder containing the reference .faa files or to one .faa file (running only one sample). Filenames have to contain the corresponding sample-name, i.e. sample_1.faa \n (default: %(default)s)",
type=str, default='./test_faa/')
parser.add_argument("--tooldict", dest="tools", help="Enter a dictionary of the AMP-tools used with their output file endings (as they appear in the directory tree), \n Tool-names have to be written as in default:\n default={'ampir':'ampir.tsv', 'amplify':'amplify.tsv', 'macrel':'macrel.tsv', 'hmmer_hmmsearch':'hmmsearch.txt', 'ensembleamppred':'ensembleamppred.txt'}",
type=str, default='{"ampir":"ampir.tsv", "amplify":"amplify.tsv", "macrel":"macrel.tsv", "neubi":"neubi.fasta", "hmmer_hmmsearch":"hmmsearch.txt", "ensembleamppred":"ensembleamppred.txt"}')
parser.add_argument("--amp_database", dest="ref_db", nargs='?', help="Enter the path to the folder containing the reference database files (.fa and .tsv); a fasta file and the corresponding table with functional and taxonomic classifications. \n (default: DRAMP database)",
type=str, default=None)
parser.add_argument("--complete_summary", dest="complete", nargs='?', help="Concatenates all sample summaries to one final summary and outputs both csv and interactive html files",
type=bool, default=False)
parser.add_argument("--log", dest="log_file", nargs='?', help="Silences the standard output and captures it in a log file)",
type=bool, default=False)
parser.add_argument("--threads", dest="cores", nargs='?', help="Changes the threads used for DIAMOND alignment (default: %(default)s)",
type=bool, default='4')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
# get command line arguments
args = parser.parse_args()
# assign input arguments to variables
path = args.amp
samplelist_in = args.samples
filepaths_in = args.files
p = args.p
faa_path = args.faa
tooldict = json.loads(args.tools)
database = args.ref_db
complete_summary = args.complete
threads = args.cores
# additional variables
# extract list of tools from input dictionary. If not given, default dict contains all possible tools
tools = [key for key in tooldict]
# extract list of tool-output file-endings. If not given, default dict contains default endings.
fileending = [val for val in tooldict.values()]
# supress panda warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#########################################
# MAIN FUNCTION
#########################################
def main_workflow():
# print AMPcombi header
print_header()
# check input sample-list and create sample-list if input empty
samplelist = check_samplelist(samplelist_in, tools, path)
# check input parameters
check_input_complete(path, samplelist, filepaths_in, tools)
# check input filepaths and create list of list of filepaths per sample if input empty
filepaths = check_pathlist(filepaths_in, samplelist, fileending, path)
# check amp_ref_database filepaths and create a directory if input empty
db = check_ref_database(database)
# initiate a final_summary dataframe to concatenate each new sample-summary
if (complete_summary):
complete_summary_df = pd.DataFrame([])
# generate summary for each sample
amp_faa_paths = []
create_diamond_ref_db(db,threads)
for i in range(0, len(samplelist)):
main_list = []
print('\n ########################################################## ')
print(f'Processing AMP-files from sample: {samplelist[i]}')
os.makedirs(samplelist[i], exist_ok=True)
# fill main_list with tool-output filepaths for sample i
read_path(main_list, filepaths[i], p, tooldict, faa_path, samplelist[i])
# get the path to the samples' corresponding faa file
faa_name = check_faa_path(faa_path, samplelist[i])
# use main_list to create the summary file for sample i
summary_df = summary(main_list, samplelist[i], faa_name)
# Generate the AMP-faa.fasta for sample i
out_path = samplelist[i] +'/'+samplelist[i]+'_amp.faa'
amp_fasta(summary_df, faa_name, out_path)
amp_faa_paths.append(out_path)
print(f'The fasta containing AMP sequences for {samplelist[i]} was saved to {samplelist[i]}/ \n')
amp_matches = samplelist[i] +'/'+samplelist[i]+'_diamond_matches.txt'
print(f'The diamond alignment for {samplelist[i]} in progress ....')
diamond_df = diamond_alignment(db, amp_faa_paths, amp_matches, threads)
print(f'The diamond alignment for {samplelist[i]} was saved to {samplelist[i]}/.')
# Merge summary_df and diamond_df
sample_summary_df = pd.merge(summary_df, diamond_df, on = 'contig_id', how='left')
# Insert column with sample name on position 0
sample_summary_df.insert(0, 'name', samplelist[i])
# Write sample summary into sample output folder
sample_summary_df.to_csv(samplelist[i] +'/'+samplelist[i]+'_ampcombi.csv', sep=',', index=False)
print(f'The summary file for {samplelist[i]} was saved to {samplelist[i]}/.')
if (complete_summary):
# concatenate the sample summary to the complete summary and overwrite it
complete_summary_df = pd.concat([complete_summary_df, sample_summary_df])
complete_summary_df.to_csv('AMPcombi_summary.csv', sep=',', index=False)
html_generator()
else:
continue
if (complete_summary):
print(f'\n FINISHED: The AMPcombi_summary.csv and AMPcombi_summary.html files were saved to your current working directory.')
else:
print(f'\n FINISHED: AMPcombi created summaries for all input samples.')
def main():
if (args.log_file == True and not os.path.exists('ampcombi.log')):
with open(f'ampcombi.log', 'w') as f:
with redirect_stdout(f):
main_workflow()
elif(args.log_file == True and os.path.exists('ampcombi.log')):
with open(f'ampcombi.log', 'a') as f:
with redirect_stdout(f):
main_workflow()
else: main_workflow()
if __name__ == "__main__":
main() | AMPcombi | /AMPcombi-0.1.7.tar.gz/AMPcombi-0.1.7/ampcombi/ampcombi.py | ampcombi.py |
# TITLE: Download the DRAMP database if input db empty AND and make database compatible for diamond
import pandas as pd
import requests
import os
from datetime import datetime
import subprocess
from Bio import SeqIO
import tempfile
import shutil
########################################
# FUNCTION: DOWNLOAD DRAMP DATABASE AND CLEAN IT
#########################################
def download_DRAMP(db):
##Download the (table) file and store it in a results directory
url = 'http://dramp.cpu-bioinfor.org/downloads/download.php?filename=download_data/DRAMP3.0_new/general_amps.xlsx'
r = requests.get(url, allow_redirects=True)
with open(db +'/'+ 'general_amps.xlsx', 'wb') as f:
f.write(r.content)
##Convert excel to tab sep file and write it to a file in the DRAMP_db directly with the date its downloaded
date = datetime.now().strftime("%Y_%m_%d")
ref_amps=pd.read_excel (db +'/'+ r'general_amps.xlsx')
ref_amps.to_csv (db +'/' + f'general_amps_{date}.tsv', index = None, header=True,sep='\t')
##Download the (fasta) file and store it in a results directory
urlfasta = 'http://dramp.cpu-bioinfor.org/downloads/download.php?filename=download_data/DRAMP3.0_new/general_amps.fasta'
z = requests.get(urlfasta)
fasta_path = os.path.join(db + '/' + f'general_amps_{date}.fasta')
with open(fasta_path, 'wb') as f:
f.write(z.content)
##Cleaning step to remove ambigous aminoacids from sequences in the database (e.g. zeros and brackets)
new_fasta = db + '/' + f'general_amps_{date}_clean.fasta'
seq_record = SeqIO.parse(open(fasta_path), "fasta")
with open(new_fasta, 'w') as f:
for record in seq_record:
id, sequence = record.id, str(record.seq)
letters = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W',',Y']
new = ''.join(i for i in sequence if i in letters)
f.write('>' + id + '\n' + new + '\n')
return os.remove(fasta_path), os.remove(db +'/'+ r'general_amps.xlsx')
########################################
# FUNCTION: CREATE DIAMOND COMPATIBLE DATBASE FORMATS
#########################################
def create_diamond_ref_db(db,threads):
cwd = os.getcwd()
for file in os.listdir(db):
if file.endswith('.fasta'):
path = os.path.join(os.path.abspath(db) + '/' + file)
os.chdir(db)
#process = subprocess.Popen([f'{scripts_path}/diamond_makedb.sh', path])
subprocess.run('diamond_makedb.sh', text=True, input=f'{path}\n{threads}')
os.chdir(cwd)
print
return path
########################################
# FUNCTION: DIAMOND ALIGNMENT
#########################################
def diamond_alignment(db, amp_faa_paths, amp_matches,threads):
#create temp folder and delete at the end
cwd = os.getcwd()
for path in amp_faa_paths:
# align the query with the database
temp = tempfile.mkdtemp()
subprocess.run('diamond_alignment.sh', text=True, input=f'{path}\n{temp}\n{db}\n{threads}')
shutil.move(temp+'/diamond_matches.tsv', amp_matches)
shutil.rmtree(temp)
# mege the diamond_alignment with the ref_db table
dd_align = pd.read_csv(amp_matches, delimiter='\t')
dd_align = dd_align[['target_id','contig_id','pident','evalue']]
for file in os.listdir(db):
if file.endswith('.tsv'):
path_2 = os.path.join(os.path.abspath(db) + '/' + file)
ref_db = pd.read_csv(path_2, delimiter='\t')
ref_db.columns.values[0] = "target_id"
merged = pd.merge(dd_align, ref_db, on='target_id',how='inner')
return merged | AMPcombi | /AMPcombi-0.1.7.tar.gz/AMPcombi-0.1.7/ampcombi/amp_database.py | amp_database.py |
import pika as p
import logging
import functools
class AMQPEz:
def __init__(self, configuration):
self.config_ = configuration
self.logger = self.initialize_logger_()
self.consuming = False
self.draining = False
def start(self):
self.connection_ = self.create_connection_()
self.connection_.ioloop.start()
def stop(self):
self.logger.info("Stopping...")
if not self.draining:
self.draining = True
if self.consuming:
self.stop_consuming_()
self.connection_.ioloop.start()
else:
self.connection_.ioloop.stop()
self.logger.info("Stopped")
def create_connection_(self):
return p.SelectConnection(
parameters = self.config_.pikaparams,
on_open_callback=self.on_connection_opened_,
on_open_error_callback=self.on_connection_open_error_,
on_close_callback=self.on_connection_closed_
)
def on_connection_opened_(self, connection):
self.logger.info("Connection opened")
connection.channel(on_open_callback=self.on_channel_openened_)
def on_connection_open_error_(self, connection, error):
self.logger.error("Connection opening errored with the following %s"%error)
def on_connection_closed_(self, connection, reason):
if self.draining:
self.connection_.ioloop.stop()
self.logger.info("Connection closed expectedly")
else:
self.logger.info("Connection closed with the following reason: %s"%reason)
def on_channel_openened_(self, channel):
self.logger.info("Opened channel")
# register exit callback
self.channel_ = channel
self.channel_.add_on_close_callback(self.on_channel_closed_)
self.assert_topology_()
def on_channel_closed_(self, channel, reason):
self.logger.info("Channel closed because of %s"%reason)
self.consuming = False
if self.connection_.is_closing or self.connection_.is_closed:
self.logger.warn("Tried to close an already closing connection")
else:
self.logger.info("Closing connection")
self.connection_.close()
def assert_topology_(self):
if self.config_.input_queue and self.config_.exchange:
self.channel_.exchange_declare(
exchange=self.config_.exchange,
exchange_type=self.config_.exchange_type,
callback = lambda _:
self.channel_.queue_declare(queue=self.config_.input_queue, callback=self.on_topology_asserted_)
)
else:
self.channel_.queue_declare(queue=self.config_.input_queue, callback=self.on_topology_asserted_)
def on_topology_asserted_(self, frame):
self.logger.info("Topology asserted")
#self.channel_.confirm_delivery(self.on_message_published) # for publisher confirms
self.channel_.basic_qos(prefetch_count=self.config_.prefetch_count,
callback=lambda _:self.start_consuming_())
def on_message_published(self, info):
## in the future this would be nice to do publisher confirms
pass
def start_consuming_(self):
self.logger.info("Starting consumption")
self.channel_.add_on_cancel_callback(self.on_consumer_cancelled_)
self.consumer_tag_ = self.channel_.basic_consume(
self.config_.input_queue, self.handle_message_
)
self.consuming = True
def stop_consuming_(self):
if self.channel_: self.channel_.basic_cancel(self.consumer_tag_, self.on_cancelled)
def on_cancelled(self, frame):
self.consuming = False
self.channel_.close()
self.logger.info("Closed channel")
def handle_message_(self, channel, deliver, props, message):
deserialized = self.config_.deserializer(message)
self.logger.info("Got message %s"%deserialized)
result = self.config_.task(deserialized)
if not result: self.channel_.basic_nack(deliver.delivery_tag); return
serialized = self.config_.serializer(result)
self.logger.info("Produced message %s"%result)
if self.config_.exchange: # produce output
self.channel_.basic_publish(self.config_.exchange, self.config_.routing_key, serialized)
self.channel_.basic_ack(deliver.delivery_tag) # delete this incase of publisher confirms
else:
self.channel_.basic_ack(deliver.delivery_tag)
def on_consumer_cancelled_(self, frame):
self.logger.info("Consumer cancelled")
if self.channel_: self.channel_.close()
def initialize_logger_(self):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger | AMQPEz | /AMQPEz-1.0.0-py3-none-any.whl/amqpez/amqpez.py | amqpez.py |
from . import AMQPEzConfig
import pika as p
class AMQPEzConfigBuilder:
def __init__(self):
self.target_config = AMQPEzConfig()
self.connection_set = None
def add_connection_params(self, host, port = 5672, vhost = '/'):
if self.connection_set: raise ValueError("Cannot set connection parameters twice")
self.connection_set = 'default'
self.target_config.host = host
self.target_config.port = port
self.target_config.vhost = vhost
return self
def add_connection_url(self, url):
if self.connection_set: raise ValueError("Cannot set connection parameters twice")
self.connection_set = 'url'
self.pikaparams = p.URLParameters(url)
return self
def add_basic_credentials(self, user, password):
self.target_config.credentials = p.PlainCredentials(user, password)
return self
def add_external_credentials(self, external_creds):
self.target_config.credentials = p.ExternalCredentials(external_creds)
return self
def add_exchange(self, exchange, exchange_type = 'direct', routing_key = ''):
self.target_config.exchange = exchange
self.target_config.exchange_type = exchange_type
self.target_config.routing_key = routing_key
return self
def add_queue(self, queue):
self.target_config.input_queue = queue
return self
def add_serializers(self, serializer, deserializer):
self.target_config.serializer = serializer
self.target_config.deserializer = deserializer
return self
def add_deserializer(self, deserializer):
self.target_config.deserializer = deserializer
return self
def add_task(self, task):
self.target_config.task = task
return self
def add_qos(self, prefetch_count):
self.target_config.prefetch_count = prefetch_count
return self
def build(self):
if self.connection_set == 'default':
self.target_config.pikaparams = p.ConnectionParameters(
host = self.target_config.host,
virtual_host = self.target_config.vhost,
port = self.target_config.port,
credentials = self.target_config.credentials
)
return self.target_config | AMQPEz | /AMQPEz-1.0.0-py3-none-any.whl/amqpez/configbuilder.py | configbuilder.py |
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:]) | AMQPQueue | /AMQPQueue-0.4.2.tar.gz/AMQPQueue-0.4.2/ez_setup.py | ez_setup.py |
try:
import pkg_resources
pkg_resources.require('amqplib >= 0.5')
except ImportError:
pass
import amqplib.client_0_8 as amqp
import logging
try:
import cPickle as pickle
except ImportError:
import pickle
logging.getLogger('amqplib').setLevel(logging.INFO) # silence amqplib
log = logging.getLogger('amqpqueue')
class Error(Exception):
"Exception raised by AmqpQueue.get()"
pass
class _AmqpQueue:
'''From http://www.lshift.net/blog/2009/06/11/python-queue-interface-for-amqp
This module attempts to create a simple wrapper to Amqp.
The idea is to mimic the subset of python Queue interface.
- the queue is persistent
- no message in queue can be lost
- message is delivered once (not fanout/broadcast)
Just to remind, the AMQP model:
exchange ---> binding ----> queue ---> consumer#1
---> consumer#2
>>> import threading
>>> FORMAT_CONS = '%(asctime)s %(name)-12s %(levelname)8s\t%(message)s'
>>> logging.basicConfig(level=logging.DEBUG, format=FORMAT_CONS)
>>> qp = Producer('test_q')
>>> qp.put('test')
>>> qc = Consumer('test_q')
>>> qc.get()
'test'
>>> qc.get()
Traceback (most recent call last):
...
Error: You must call queue.task_done before you are allowed to get new item.
>>> qc.task_done()
>>> len(qp)
0
>>> qp.consumers()
1
>>> threading.Timer(1.0, lambda: qp.put('a') ).start()
>>> qc.get()
'a'
>>> qc.task_done()
>>> qc.delete(); qc.close()
>>> qp.delete(); qp.close()
>>> qc = Consumer('test_qas')
>>> qc.delete(); qc.close()
'''
# pickle load/dump
dumps = lambda _,s:pickle.dumps(s, -1)
loads = pickle.loads
content_type = "text/x-python"
def __init__(self, queue_name, addr='localhost:5672', \
userid='guest', password='guest', ssl=False, exchange_name='sqs_exchange', binding=None):
self.addr = addr
self.queue_name = queue_name
if binding:
self.binding = binding
else:
self.binding = queue_name
self.exchange_name = exchange_name
self.addr = addr
self.userid = userid
self.password = password
self.ssl = ssl
''' Create amqp connection, channels and bindings '''
self.conn = amqp.Connection(self.addr,
userid = self.userid,
password = self.password,
ssl = self.ssl)
self.ch = self.conn.channel()
def _close_connection(self):
''' Drop tcp/ip connection and amqp abstractions '''
for obj in [self.ch, self.conn, self.conn.transport.sock]:
try:
obj.close()
except Exception:
pass
self.ch, self.conn = None, None
def _declare(self):
''' Define amqp queue, returns (qname, n_msgs, n_consumers) '''
return self.ch.queue_declare(self.queue_name, passive=False, \
durable=True, exclusive=False, auto_delete=False)
def qsize(self):
''' Return number of messages waiting in this queue '''
_, n_msgs, _ = self._declare()
return n_msgs
def consumers(self):
''' How many clients are currently listening to this queue. '''
_, _, n_consumers = self._declare()
return n_consumers
def __len__(self):
''' I think Queue should support len() '''
return self.qsize()
def delete(self):
''' Delete a queue and free data tied to it. '''
try:
self.ch.queue_delete(self.queue_name)
except (TypeError, amqp.AMQPChannelException):
pass
def close(self):
''' Close tcp/ip connection '''
self._close_connection()
class Producer(_AmqpQueue):
'''
Creates/sends/produces messages into the queue.
'''
def __init__(self, *args, **kwargs):
_AmqpQueue.__init__(self, *args, **kwargs)
self.ch.access_request('/data', active=True, read=False, write=True)
self.ch.exchange_declare(self.exchange_name, 'direct', \
durable=True, auto_delete=False)
self._declare()
self.ch.queue_bind(self.queue_name, self.exchange_name, self.queue_name)
def put(self, message):
''' Add message to queue '''
msg = amqp.Message(self.dumps(message), content_type=self.content_type)
self.ch.basic_publish(msg, self.exchange_name, self.queue_name)
class Consumer(_AmqpQueue):
'''
Receives/consumes messages from the queue.
'''
def __init__(self, *args, **kwargs):
_AmqpQueue.__init__(self, *args, **kwargs)
self.ch.access_request('/data', active=True, read=True, write=False)
self._declare()
self.ch.queue_bind(self.queue_name, self.exchange_name, self.queue_name)
self.delivery_tag = None
self.consumer_tag = self.ch.basic_consume(self.queue_name,
callback=self._amqp_callback)
self._amqp_messages = []
def get(self):
"""
Timeout and non-blocking is not implemented.
"""
if self.delivery_tag is not None:
raise Error('You must call queue.task_done'
' before you are allowed to get new item.')
msg = self._get_blocking()
data = self.loads(msg.body)
self.delivery_tag = msg.delivery_tag
return data
def _amqp_callback(self, msg):
self._amqp_messages.append(msg)
def _get_blocking(self):
while not self._amqp_messages:
self.ch.wait()
return self._amqp_messages.pop(0)
def task_done(self):
''' Indicate that a formerly enqueued task is complete. '''
assert self.delivery_tag is not None
self.ch.basic_ack(self.delivery_tag)
self.delivery_tag = None
def task_failed(self):
''' Indicate that a formerly enqueued task has failed. This will return the
msg to the queue.'''
assert self.delivery_tag is not None
self.ch.basic_reject(self.delivery_tag, requeue=True)
self.delivery_tag = None
class Subscriber(Consumer):
'''
Receives/consumes messages from a subscription queue. If 3 Subscribers connect to
a given Producer, each will have it's own persistent queue, and each queue would
recieve a copy of any message the Producer puts out (Fan-out.)
'''
def __init__(self, *args, **kwargs):
_AmqpQueue.__init__(self, *args, **kwargs)
self.ch.access_request('/data', active=True, read=True, write=False)
self._declare()
self.ch.queue_bind(self.queue_name, self.exchange_name, self.binding)
self.delivery_tag = None
self.consumer_tag = self.ch.basic_consume(self.queue_name,
callback=self._amqp_callback)
self._amqp_messages = []
if __name__ == '__main__':
import sys
import doctest
try:
import coverage
except ImportError:
print >> sys.stderr, " [*] python-coverage not found"
coverage = None
if coverage:
coverage.erase()
coverage.start()
coverage.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
import amqpqueue
modules = [amqpqueue]
for module in modules:
doctest.testmod(module)
if coverage:
coverage.stop()
coverage.report(modules, ignore_errors=1, show_missing=1)
coverage.erase() | AMQPQueue | /AMQPQueue-0.4.2.tar.gz/AMQPQueue-0.4.2/amqpqueue/amqpqueue.py | amqpqueue.py |
from amqpqueue import Producer, Consumer, Subscriber
class QueueFactory(object):
"""Allows you to set defaults for your producer and consumer queues
eg
>>> qf = QueueFactory(addr="remote:5000", exchange_name="worker_exchange")
>>> qp = qf.Producer("my_queue")
>>> qp.put("etc.")
"etc." has been put to 'my_queue' on remote:5000 through the exchange "worker_exchange"
A QueueFactory is a convenient instance to pass to a more complex daemon worker that requires
its own queues and workers.
The set 'defaults' can be overridden:
>>> qp = qf.Consumer("notices", exchange_name="other_sqs_exchange")
"""
def __init__(self, addr='localhost:5672', userid='guest', password='guest', ssl=False, exchange_name='sqs_exchange'):
""" Sets up a context dict, so that when either a Producer or Consumer is required,
the context can be easily overridden by supplied parameters"""
self.context = {}
self.context['addr'] = addr
self.context['userid']= userid
self.context['password']= password
self.context['ssl'] = ssl
self.context['exchange_name'] = exchange_name
def Producer(self, queue, **kw):
this_context = self.context.copy()
for key in kw:
this_context[key] = kw[key]
return Producer(queue, addr=this_context['addr'],
userid=this_context['userid'],
password=this_context['password'],
ssl=this_context['ssl'],
exchange_name=this_context['exchange_name'])
def Consumer(self, queue, **kw):
this_context = self.context.copy()
for key in kw:
this_context[key] = kw[key]
return Consumer(queue, addr=this_context['addr'],
userid=this_context['userid'],
password=this_context['password'],
ssl=this_context['ssl'],
exchange_name=this_context['exchange_name'])
def Subscriber(self, queue, binding, **kw):
this_context = self.context.copy()
for key in kw:
this_context[key] = kw[key]
return Subscriber(queue, addr=this_context['addr'],
userid=this_context['userid'],
password=this_context['password'],
ssl=this_context['ssl'],
exchange_name=this_context['exchange_name'],
binding=binding) | AMQPQueue | /AMQPQueue-0.4.2.tar.gz/AMQPQueue-0.4.2/amqpqueue/queuefactory.py | queuefactory.py |
# Most workers are expected to use JSON msgs
import simplejson
# For the HTTPWorker
from urllib import urlencode
import httplib2
# For tempfile/ramfile handling:
# Unicode is highly likely, therefore StringIO > cStringIO
from StringIO import StringIO
from tempfile import mkstemp
from os import remove
STATUSES = {'FAIL':0,
'COMPLETE':1,
}
# shorthand
FAIL = STATUSES['FAIL']
COMPLETE = STATUSES['COMPLETE']
class WorkerResponse(object):
def __init__(self, status, **kw):
self.status = status
self.context = kw
class JsonMsgParseError(Exception):
"""JSON passed as a message over the queue couldn't be decoded."""
pass
class WorkerException(Exception):
def __init__(self, response=None):
self.response = response
def __str__(self):
if self.response:
resp_string = "Worker failed with a status: %s" % (STATUSES[self.response.status])
if self.response.context:
resp_string += "\n Context: %s" % self.response.context
else:
return "Worker failed"
class Worker(object):
def __init__(self, queue_stdin, queue_stdout=None, **kw):
"""Base class for all the workers.
queue_stdin - the instance passed through queue_stdin should implement a
blocking .get(), .task_done() and __len__().
queue_stdout - the instance passed should implement a blocking .put() and
non-blocking __len__()
Other keyword parameters can be passed to the workers as necessary.
Overwrite the .starttask(msg) command, which is passed the contents of the message.
.endtask(msg, response, **kw) can likewise be overridden to perform addition tasks
after the main work has been completed - BUT methods must acknoledge the msg via a
.task_done() on the queue "stdin".
self.context is a dictionary of all other parameters passed to the worker.
"""
self.queue_stdin = queue_stdin
self.queue_stdout = queue_stdout
self.context = kw
self.stop = False
if 'start' in kw:
self.run()
def parse_json_msg(self, msg, encoding="UTF-8"):
try:
return simplejson.loads(msg,encoding=encoding)
except:
raise JsonMsgParseError
def run(self):
while (True):
# Blocking call:
if self.stop:
break
msg = self.queue_stdin.get()
# TODO implement variable timeout on .starttask() method
resp = self.starttask(msg)
self.endtask(msg, resp)
def starttask(self, msg):
"""Implements a basic 'echo' worker - pointless, but illustrative.
This method should be overridden by a specific worker class."""
return WorkerResponse(COMPLETE)
def endtask(self, msg, response):
"""Simple task end, ack'ing the message consuming it on a COMPLETE response."""
if response.status == FAIL:
raise WorkerException(resp)
elif response.status == COMPLETE:
if self.queue_stdout:
self.queue_stdout.put(msg)
else:
# print msg
pass
self.queue_stdin.task_done()
class JSONWorker(Worker):
"""Similar in practice to the normal Worker, except it only tolerates JSON
messages and will ignore any it cannot parse.
Passing it an outbound queue will allow it to pass any unparsable msgs onwards."""
def run(self):
while (True):
# Blocking call:
if self.stop:
break
msg = self.queue_stdin.get()
# TODO implement variable timeout on .starttask() method
try:
jmsg = simplejson.loads(msg)
resp = self.starttask(jmsg)
self.endtask(jmsg, resp)
except Exception, e:
print "Failed to parse\n%s" % msg
print e
if self.queue_stdout:
self.queue_stdout.put(msg)
# Actively consume bad messages
self.queue_stdin.task_done()
class WorkerFactory(object):
def get(config):
pass
class HTTPWorker(Worker):
"""Gets a local copy of the resource at the URL in the JSON msg ('url') and simply
prints the first "line".
It is expected that self.endtask will be overwritten.
If the tempfile option is set, remember to delete the temporary file
as well as ack the msg! Eg -
------------------------------------------------------------
import os
class SolrFeeder(HTTPWorker):
def endtask(self, msg, response):
try:
# do stuff with response.context['fd'], the file-descriptor for the resource
finally:
response.context['fd'].close()
if self.context.get('tempfile', False):
os.remove(response.context['tempfile'])
self.queue_stdin.task_done()
s = SolrFeeder(queue_stdin, queue_stdout=None, tempfile = True)
------------------------------------------------------------
If 'id' is passed in the message instead, then this is inserted into a template, set
by instantiating this worker with the parameter 'http_template'. Normal python
string formating applies ( template % id )
Requires configuration parameters:
http_template = template for the URL to GET
"""
def _get_tempfile(self):
return mkstemp()
def _get_ramfile(self):
return (StringIO(), None)
def httpsetup(self):
self.http_template = self.context.get('http_template', None)
self.h = httplib2.Http()
self.method = self.context.get('method', 'GET')
self.data_method = self.context.get('method', 'GETURL')
if self.context.get('tempfile', False):
self.tempfile = self._get_tempfile
else:
self.tempfile = self._get_ramfile
self.setup = True
def starttask(self, msg):
"""This will very simply GET the url supplied and pass the temp/ramfile to endtask"""
try:
if not self.setup:
self.httpsetup()
(fd, name) = self.tempfile()
jmsg = self.parse_json_msg(msg)
# Prepare HTTP request
headers = {}
if 'headers' in jmsg:
headers = jmsg['headers']
url = None
if 'url' in jmsg:
url = jmsg['url']
elif 'id' in jmsg and self.http_template:
url = self.http_template % jmsg['id']
else:
return WorkerResponse(FAIL)
if not url:
raise Exception("url not supplied")
fd.write(h.request(jmsg['url'], "GET", headers=headers))
fd.seek(0)
return WorkerResponse(COMPLETE, fd=fd, tempfile=name, jmsg=jmsg, url=url)
except Exception, e:
return WorkerResponse(FAIL, exception = e)
def endtask(self, msg, response):
"""Demo method to be overwritten. This simply reads the first 100 characters from
the reponse.context['fd'] (file-handle) and deletes/removes the file."""
try:
first_bit = response.context['fd'].read(100)
if self.queue_stdout:
self.queue_stdout.put(first_bit)
else:
print "From url: %s, first 100 chars: \n %s" % (response.context['url'], first_bit)
finally:
response.context['fd'].close()
if self.context.get('tempfile', False):
remove(response.context['tempfile'])
self.queue_stdin.task_done()
class ShellFileWorker(Worker):
pass
class ShellWorker(Worker):
pass | AMQPQueue | /AMQPQueue-0.4.2.tar.gz/AMQPQueue-0.4.2/amqpqueue/worker.py | worker.py |
AMQPStorm-Pool
==============
`AMQPStorm <https://github.com/eandersson/amqpstorm>`_ connection pooling based on `pika-pool <https://github.com/bninja/pika-pool>`_.
|Version| |Travis| |Coverage|
usage
-----
Get it:
.. code:: bash
pip install amqpstorm-pool
and use it:
.. code:: python
import json
import amqpstorm
import amqpstorm_pool
uri = 'amqp://guest:guest@localhost:5672/%2F?heartbeat=60'
pool = amqpstorm_pool.QueuedPool(
create=lambda: amqpstorm.UriConnection(uri),
max_size=10,
max_overflow=10,
timeout=10,
recycle=3600,
stale=45,
)
with pool.acquire() as cxn:
cxn.channel.queue.declare('fruits')
cxn.channel.basic.publish(
body=json.dumps({
'type': 'banana',
'description': 'they are yellow'
}),
exchange='',
routing_key='fruits',
properties={
'content_type': 'text/plain',
'headers': {'key': 'value'}
}
)
.. |Version| image:: https://badge.fury.io/py/AMQPStorm-Pool.svg
:target: https://badge.fury.io/py/AMQPStorm-Pool
.. |Travis| image:: https://travis-ci.org/eandersson/amqpstorm-pool.svg
:target: https://travis-ci.org/eandersson/amqpstorm-pool
.. |Coverage| image:: https://codecov.io/gh/eandersson/amqpstorm-pool/branch/master/graph/badge.svg
:target: https://codecov.io/gh/eandersson/amqpstorm-pool
| AMQPStorm-Pool | /AMQPStorm-Pool-1.0.1.tar.gz/AMQPStorm-Pool-1.0.1/README.rst | README.rst |
__version__ = '1.0.1'
__all__ = [
'Error',
'Timeout',
'Overflow',
'Connection',
'Pool',
'NullPool',
'QueuedPool',
]
from datetime import datetime
import logging
try:
# python 3
import queue
except ImportError:
# python 2
import Queue as queue
import threading
import time
import amqpstorm
LOGGER = logging.getLogger(__name__)
class Error(Exception):
pass
class Overflow(Error):
"""
Raised when a `Pool.acquire` cannot allocate anymore connections.
"""
pass
class Timeout(Error):
"""
Raised when an attempt to `Pool.acquire` a connection has timedout.
"""
pass
class Connection(object):
"""
Connection acquired from a `Pool` instance. Get them like this:
.. code:: python
with pool.acquire() as cxn:
print cxn.channel
"""
#: Exceptions that imply connection has been invalidated.
connectivity_errors = (
amqpstorm.AMQPConnectionError,
amqpstorm.AMQPChannelError,
)
@classmethod
def is_connection_invalidated(cls, exc):
"""
Says whether the given exception indicates the connection has
been invalidated.
:param exc: Exception object.
:return: True if connection has been invalidated, otherwise False.
"""
return any(
isinstance(exc, error) for error in cls.connectivity_errors
)
def __init__(self, pool, fairy):
self.pool = pool
self.fairy = fairy
@property
def channel(self):
if self.fairy.channel is None:
self.fairy.channel = self.fairy.cxn.channel()
return self.fairy.channel
def close(self):
self.pool.close(self.fairy)
self.fairy = None
def release(self):
self.pool.release(self.fairy)
self.fairy = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None or not self.is_connection_invalidated(value):
self.release()
else:
self.close()
class Pool(object):
"""
Pool interface similar to:
http://docs.sqlalchemy.org/en/latest/core/pooling.html#sqlalchemy.pool.Pool
and used like:
.. code:: python
with pool.acquire(timeout=60) as cxn:
cxn.channel.basic.publish(
...
)
"""
#: Acquired connection type.
Connection = Connection
def __init__(self, create):
"""
:param create: Callable creating a new connection.
"""
self.create = create
def acquire(self, timeout=None):
"""
Retrieve a connection from the pool or create a new one.
"""
raise NotImplementedError
def release(self, fairy):
"""
Return a connection to the pool.
"""
raise NotImplementedError
def close(self, fairy):
"""
Forcibly close a connection, suppressing any connection errors.
"""
fairy.close()
class Fairy(object):
"""
Connection wrapper for tracking its associated state.
"""
def __init__(self, cxn):
self.cxn = cxn
self.channel = None
def close(self):
if self.channel:
try:
self.channel.close()
except Connection.connectivity_errors as ex:
if not Connection.is_connection_invalidated(ex):
raise
self.channel = None
try:
self.cxn.close()
except Connection.connectivity_errors as ex:
if not Connection.is_connection_invalidated(ex):
raise
@property
def cxn_str(self):
if self.cxn:
return '{0}:{1}/{2}'.format(
self.cxn.parameters.get('hostname'),
self.cxn.parameters.get('port'),
self.cxn.parameters.get('virtual_host')
)
def __str__(self):
channel = int(self.channel) if self.channel else self.channel
return ', '.join('{0}={1}'.format(k, v) for k, v in [
('cxn', self.cxn_str),
('channel', '{0}'.format(channel)),
])
def _create(self):
"""
All fairy creates go through here.
"""
return self.Fairy(self.create())
class NullPool(Pool):
"""
Dummy pool. It opens/closes connections on each acquire/release.
"""
def acquire(self, timeout=None):
return self.Connection(self, self._create())
def release(self, fairy):
self.close(fairy)
class QueuedPool(Pool):
"""
Queue backed pool.
"""
def __init__(self, create, max_size=10, max_overflow=10, timeout=30,
recycle=None, stale=None):
"""
:param max_size:
Maximum number of connections to keep queued.
:param max_overflow:
Maximum number of connections to create above `max_size`.
:param timeout:
Default number of seconds to wait for a connections to available.
:param recycle:
Lifetime of a connection (since creation) in seconds or None for no
recycling. Expired connections are closed on acquire.
:param stale:
Threshold at which inactive (since release) connections are
considered stale in seconds or None for no staleness. Stale
connections are closed on acquire.
"""
self.max_size = max_size
self.max_overflow = max_overflow
self.timeout = timeout
self.recycle = recycle
self.stale = stale
self._queue = queue.Queue(maxsize=self.max_size)
self._avail_lock = threading.Lock()
self._avail = self.max_size + self.max_overflow
super(QueuedPool, self).__init__(create)
def acquire(self, timeout=None):
try:
fairy = self._queue.get(False)
except queue.Empty:
try:
fairy = self._create()
except Overflow:
timeout = timeout or self.timeout
try:
fairy = self._queue.get(timeout=timeout)
except queue.Empty:
try:
fairy = self._create()
except Overflow:
raise Timeout()
if self.is_expired(fairy):
LOGGER.info('closing expired connection - %s', fairy)
self.close(fairy)
return self.acquire(timeout=timeout)
if self.is_stale(fairy):
LOGGER.info('closing stale connection - %s', fairy)
self.close(fairy)
return self.acquire(timeout=timeout)
try:
if fairy.channel:
fairy.channel.check_for_errors()
elif fairy.cxn:
fairy.cxn.check_for_errors()
except amqpstorm.AMQPError:
LOGGER.info('closing broken connection - %s', fairy)
self.close(fairy)
return self.acquire(timeout=timeout)
return self.Connection(self, fairy)
def release(self, fairy):
fairy.released_at = time.time()
try:
self._queue.put_nowait(fairy)
except queue.Full:
self.close(fairy)
def close(self, fairy):
# inc
with self._avail_lock:
self._avail += 1
return super(QueuedPool, self).close(fairy)
def _create(self):
# dec
with self._avail_lock:
if self._avail <= 0:
raise Overflow()
self._avail -= 1
try:
return super(QueuedPool, self)._create()
except Exception:
# inc
with self._avail_lock:
self._avail += 1
raise
class Fairy(Pool.Fairy):
def __init__(self, cxn):
super(QueuedPool.Fairy, self).__init__(cxn)
self.released_at = self.created_at = time.time()
def __str__(self):
channel = int(self.channel) if self.channel else self.channel
created_at = datetime.fromtimestamp(self.created_at).isoformat()
released_at = datetime.fromtimestamp(self.released_at).isoformat()
return ', '.join('{0}={1}'.format(k, v) for k, v in [
('cxn', self.cxn_str),
('channel', '{0}'.format(channel)),
('created_at', '{0}'.format(created_at)),
('released_at', '{0}'.format(released_at)),
])
def is_stale(self, fairy):
if not self.stale:
return False
return (time.time() - fairy.released_at) > self.stale
def is_expired(self, fairy):
if not self.recycle:
return False
return (time.time() - fairy.created_at) > self.recycle | AMQPStorm-Pool | /AMQPStorm-Pool-1.0.1.tar.gz/AMQPStorm-Pool-1.0.1/amqpstorm_pool/__init__.py | __init__.py |
AMQPStorm
=========
Thread-safe Python RabbitMQ Client & Management library.
|Version|
Introduction
============
AMQPStorm is a library designed to be consistent, stable and thread-safe.
- 100% Test Coverage!
- Supports Python 2.7 and Python 3.3+.
- Fully tested against Python Implementations; CPython and PyPy.
Documentation
=============
Additional documentation is available on `amqpstorm.io <https://www.amqpstorm.io>`_.
Changelog
=========
Version 2.10.6
--------------
- Fixed deprecated warning when using Python 3.11.
Version 2.10.5
--------------
- Added support for bulk removing users using the Management Api.
- Added support to get the Cluster name using the Management Api.
- Fixed ConnectionUri to default to port 5761 when using ssl [#119] - Thanks s-at-ik.
Version 2.10.4
--------------
- Fixed issue with a forcefully closed channel not sending the appropriate response [#114] - Thanks Bernd Höhl.
Version 2.10.3
--------------
- Fixed install bug with cp1250 encoding on Windows [#112] - Thanks ZygusPatryk.
Version 2.10.2
--------------
- Fixed bad socket fd causing high cpu usage [#110] - Thanks aiden0z.
Version 2.10.1
--------------
- Fixed bug with UriConnection not handling amqps:// properly.
- Improved documentation.
Version 2.10.0
--------------
- Added Pagination support to Management list calls (e.g. queues list).
- Added Filtering support to Management list calls.
- Re-use the requests sessions for Management calls.
- Updated to use pytest framework instead of nose for testing.
Version 2.9.0
-------------
- Added support for custom Message implementations - Thanks Jay Hogg.
- Fixed a bug with confirm_delivery not working after closing and re-opening an existing channel.
- Re-worked the channel re-use code.
Version 2.8.5
-------------
- Fixed a potential deadlock when opening a channel with a broken connection [#97] - Thanks mehdigmira.
Version 2.8.4
-------------
- Fixed a bug in Message.create where it would mutate the properties dict [#92] - Thanks Killerama.
Version 2.8.3
-------------
- Fixed pip sdist circular dependency [#88] - Thanks Jay Hogg.
- Fixed basic.consume argument type in documentation [#86] - Thanks TechmarkDavid.
Version 2.8.2
-------------
- Retry on SSLWantReadErrors [#82] - Thanks Bernhard Thiel.
- Added getter/setter methods for Message properties expiration, message_type and user_id [#86] - Thanks Jay Hogg.
Version 2.8.1
-------------
- Cleaned up documentation.
Version 2.8.0
-------------
- Introduced a new channel function called check_for_exceptions.
- Fixed issue where publish was successful but raises an error because connection was closed [#80] - Thanks Pavol Plaskon.
- Updated SSL handling to use the non-deprecated way of creating a SSL Connection [#79] - Thanks Carl Hörberg from CloudAMQP.
- Enabled SNI for SSL connections by default [#79] - Thanks Carl Hörberg from CloudAMQP.
Version 2.7.2
-------------
- Added ability to override client_properties [#77] - Thanks tkram01.
Version 2.7.1
-------------
- Fixed Connection close taking longer than intended when using SSL [#75]- Thanks troglas.
- Fixed an issue with closing Channels taking too long after the server initiated it.
Version 2.7.0
-------------
- Added support for passing your own ssl context [#71] - Thanks troglas.
- Improved logging verbosity on connection failures [#72] - Thanks troglas.
- Fixed occasional error message when closing a SSL connection [#68] - Thanks troglas.
Version 2.6.2
-------------
- Set default TCP Timeout to 10s on UriConnection to match Connection [#67] - Thanks josemonteiro.
- Internal RPC Timeout for Opening and Closing Connections are now set to a fixed 30s [#67] - Thanks josemonteiro.
Version 2.6.1
-------------
- Fixed minor issue with the last channel id not being available.
Version 2.6.0
-------------
- Re-use closed channel ids [#55] - Thanks mikemrm.
- Changed Poller Timeout to be a constant.
- Improved Connection Close performance.
- Channels is now a publicly available variable in Connections.
Version 2.5.0
-------------
- Upgraded pamqp to v2.0.0.
- Python 3 keys will now always be of type str.
- For more information see https://pamqp.readthedocs.io/en/latest/history.html
- Properly wait until the inbound queue is empty when break_on_empty is set [#63] - Thanks TomGudman.
- Fixed issue with Management queue/exchange declare when the passive flag was set to True.
Credits
=======
Special thanks to gmr (Gavin M. Roy) for creating pamqp, and in addition amqpstorm is heavily influenced by his pika and rabbitpy libraries.
.. |Version| image:: https://badge.fury.io/py/AMQPStorm.svg
:target: https://badge.fury.io/py/AMQPStorm
| AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/README.rst | README.rst |
Changelog
=========
Version 2.10.6
--------------
- Fixed deprecated warning when using Python 3.11.
Version 2.10.5
--------------
- Added support for bulk removing users with the Management Api.
- Added support to get the Cluster Name using the Management Api.
- Fixed ConnectionUri to default to port 5761 when using ssl [#119] - Thanks s-at-ik.
Version 2.10.4
--------------
- Fixed issue with a forcefully closed channel not sending the appropriate response [#114] - Thanks Bernd Höhl.
Version 2.10.3
--------------
- Fixed install bug with cp1250 encoding on Windows [#112] - Thanks ZygusPatryk.
Version 2.10.2
--------------
- Fixed bad socket fd causing high cpu usage [#110] - Thanks aiden0z.
Version 2.10.1
--------------
- Fixed bug with UriConnection not handling amqps:// properly.
- Improved documentation.
Version 2.10.0
--------------
- Added Pagination support to Management list calls (e.g. queues list).
- Added Filtering support to Management list calls.
- Re-use the requests sessions for Management calls.
- Updated to use pytest framework instead of nose for testing.
Version 2.9.0
-------------
- Added support for custom Message implementations - Thanks Jay Hogg.
- Fixed a bug with confirm_delivery not working after closing and re-opening an existing channel.
- Re-worked the channel re-use code.
Version 2.8.5
-------------
- Fixed a potential deadlock when opening a channel with a broken connection [#97] - Thanks mehdigmira.
Version 2.8.4
-------------
- Fixed a bug in Message.create where it would mutate the properties dict [#92] - Thanks Killerama.
Version 2.8.3
-------------
- Fixed pip sdist circular dependency [#88] - Thanks Jay Hogg.
- Fixed basic.consume argument type in documentation [#86] - Thanks TechmarkDavid.
Version 2.8.2
-------------
- Retry on SSLWantReadErrors [#82] - Thanks Bernhard Thiel.
- Added getter/setter methods for Message properties expiration, message_type and user_id [#86] - Thanks Jay Hogg.
Version 2.8.1
-------------
- Cleaned up documentation.
Version 2.8.0
-------------
- Introduced a new channel function called check_for_exceptions.
- Fixed issue where a publish was successful but raised an error because connection was closed [#80] - Thanks Pavol Plaskon.
- Updated TLS handling to use the non-deprecated way of creating a TLS Connection [#79] - Thanks Carl Hörberg from CloudAMQP.
- Enabled SNI for TLS connections by default [#79] - Thanks Carl Hörberg from CloudAMQP.
Version 2.7.2
-------------
- Added ability to override client_properties [#77] - Thanks tkram01.
Version 2.7.1
-------------
- Fixed Connection close taking longer than intended when using SSL [#75] - Thanks troglas.
- Fixed an issue with closing Channels taking too long after the server initiated it.
Version 2.7.0
-------------
- Added support for passing your own ssl context [#71] - Thanks troglas.
- Improved logging verbosity on connection failures [#72] - Thanks troglas.
- Fixed occasional error message when closing a SSL connection [#68] - Thanks troglas.
Version 2.6.2
-------------
- Set default TCP Timeout to 10s on UriConnection to match Connection [#67] - Thanks josemonteiro.
- Internal RPC Timeout for Opening and Closing Connections are now set to a fixed 30s [#67] - Thanks josemonteiro.
Version 2.6.1
-------------
- Fixed minor issue with the last channel id not being available.
Version 2.6.0
-------------
- Re-use closed channel ids [#55] - Thanks mikemrm.
- Changed Poller Timeout to be a constant.
- Improved Connection Close performance.
- Channels is now a publicly available variable in Connections.
Version 2.5.0
-------------
- Upgraded pamqp to v2.0.0.
- Python 3 keys will now always be of type str.
- For more information see https://pamqp.readthedocs.io/en/latest/history.html
- Properly wait until the inbound queue is empty when break_on_empty is set [#63] - Thanks TomGudman.
- Fixed issue with Management queue/exchange declare when the passive flag was set to True.
Version 2.4.2
-------------
- Added support for External Authentication - Thanks Bernd Höhl.
- Fixed typo in setup.py extra requirements - Thanks Bernd Höhl.
- LICENSE file now included in package - Thanks Tomáš Chvátal.
Version 2.4.1
-------------
- Added client/server negotiation to better determine the maximum supported channels and frame size [#52] - Thanks gastlich.
- We now raise an exception if the maximum allowed channel count is ever reached.
Version 2.4.0
-------------
- basic.consume now allows for multiple callbacks [#48].
Version 2.3.0
-------------
- Added delivery_tag property to message.
- Added redelivered property to message [#41] - Thanks tkram01.
- Added support for Management Api Healthchecks [#39] - Thanks Julien Carpentier.
- Fixed incompatibility with Sun Solaris 10 [#46] - Thanks Giuliox.
- Fixed delivery_tag being set to None by default [#47] - tkram01.
- Exposed requests verify and certs flags to Management Api [#40] - Thanks Julien Carpentier.
Version 2.2.2
-------------
- Fixed potential Heartbeat deadlock when forcefully killing process - Thanks Charles Pierre.
Version 2.2.1
-------------
- Fixed potential Channel leak [#36] - Thanks Adam Mills.
- Fixed threading losing select module during python shutdown [#37] - Thanks Adam Mills.
Version 2.2.0
-------------
- Connection.close should now be more responsive.
- Channels are now reset when re-opening an existing connection.
- Re-wrote large portions of the Test suit.
Version 2.1.4
-------------
- Added parameter to override auto-decode on incoming Messages - Thanks Travis Griggs.
- Fixed a rare bug that could cause the consumer to get stuck if the connection unexpectedly dies - Thanks Connor Wolf.
Version 2.1.3
-------------
- Fixed a potential recursion error in Connection.close.
Version 2.1.1
-------------
- Reduced default TCP Timeout from 30s to 10s.
- Connection Open/Close timeout is now three times the value of TCP Timeout.
- Connection will now wait for a response from the remote server before closing.
Version 2.1.0
-------------
- [Experimental] Added support for the RabbitMQ Management Api.
- Documentation https://amqpstorm.readthedocs.io/en/latest/#management-api-documentation
- Examples https://github.com/eandersson/amqpstorm/tree/master/examples/management_api
- Connection/Channel function check_for_errors now behave more consistently.
Version 2.0.0
-------------
- Messages are now delivered as Message objects by default.
- to_tuple and to_dict are now set to False by default.
This is a breaking change that affects the following function:
- channel.process_data_events
- channel.start_consuming
- channel.basic.get
Version 1.5.0
-------------
- Added support for Channel.Tx (Server local transactions). [#27]
- Added support for Heartbeat interval 0 (disabled). [#26]
- Added Python implementation to platform string, e.g. Python 2.7.0 (Jython).
- Fixed Jython bug. [#25]
- Fixed incorrect log line for the Connection and Channel Context Manager.
- Removed TCP Keepalive.
Version 1.4.1
-------------
- Heartbeats are now only sent when there is no outgoing traffic - Thanks Tom.
Version 1.4.0
-------------
- 100% Unit-test Coverage!
- All classes are now slotted.
- New improved Heartbeat Monitor.
- If no data has been sent within the Heartbeat interval, the client will now send a Heartbeat to the server - Thanks David Schneider.
- Reduced default RPC timeout from 120s to 60s.
Version 1.3.4
-------------
- Dropped Python 3.2 Support.
- Fixed incorrect SSL warning when adding heartbeat or timeout to uri string [#18] - Thanks Adam Mills.
Version 1.3.3
-------------
- Fixed bug causing messages without a body to not be consumed properly [#16] - Thanks Adam Mills.
Version 1.3.2
-------------
- Fixed minor bug in the Poller error handling.
- Fixed issue where network corruption could caused a connection error to throw the wrong exception.
Version 1.3.1
-------------
- Fixed SSL bug that could trigger an exception when running multiple threads [#14] - Thanks Adam Mills.
- Fixed bug when using channel.basic.get to retrieve large payloads.
- Reduced default RPC timeout from 360s to 120s.
Version 1.3.0
-------------
- Removed noisy logging.
- Fixed Runtime exception caused by listener trying to join itself [#11] - Thanks ramonz.
- Channels are no longer closed after RabbitMQ throws a recoverable exception.
- Added Error mapping based on the AMQP 0.9.1 specifications (when applicable).
Introduced three new variables to the AMQP-Storm Exceptions.
- error_code: This provides HTTP style error codes based on the AMQP Specification.
- error_type: This provides the full AMQP Error name; e.g. NO-ROUTE.
- documentation: This provides the official AMQP Specification documentation string.
These variables are available on all AMQP-Storm exceptions, but if no error code was
provided by RabbitMQ, they will be empty.
Usage:
except AMQPChannelError as why:
if why.error_code == 312:
self.channel.queue.declare(queue_name)
| AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/CHANGELOG.rst | CHANGELOG.rst |
import threading
import time
from uuid import uuid4
from amqpstorm.base import IDLE_WAIT
from amqpstorm.exception import AMQPChannelError
class Rpc(object):
"""Internal RPC handler.
:param object default_adapter: Connection or Channel.
:param int,float timeout: Rpc timeout.
"""
def __init__(self, default_adapter, timeout=360):
self._lock = threading.Lock()
self._default_connection_adapter = default_adapter
self._timeout = timeout
self._response = {}
self._request = {}
@property
def lock(self):
return self._lock
def on_frame(self, frame_in):
"""On RPC Frame.
:param specification.Frame frame_in: Amqp frame.
:return:
"""
if frame_in.name not in self._request:
return False
uuid = self._request[frame_in.name]
if self._response[uuid]:
self._response[uuid].append(frame_in)
else:
self._response[uuid] = [frame_in]
return True
def register_request(self, valid_responses):
"""Register a RPC request.
:param list valid_responses: List of possible Responses that
we should be waiting for.
:return:
"""
uuid = str(uuid4())
self._response[uuid] = []
for action in valid_responses:
self._request[action] = uuid
return uuid
def remove(self, uuid):
"""Remove any data related to a specific RPC request.
:param str uuid: Rpc Identifier.
:return:
"""
self.remove_request(uuid)
self.remove_response(uuid)
def remove_request(self, uuid):
"""Remove any RPC request(s) using this uuid.
:param str uuid: Rpc Identifier.
:return:
"""
for key in list(self._request):
if self._request[key] == uuid:
del self._request[key]
def remove_response(self, uuid):
"""Remove a RPC Response using this uuid.
:param str uuid: Rpc Identifier.
:return:
"""
if uuid in self._response:
del self._response[uuid]
def get_request(self, uuid, raw=False, multiple=False,
connection_adapter=None):
"""Get a RPC request.
:param str uuid: Rpc Identifier
:param bool raw: If enabled return the frame as is, else return
result as a dictionary.
:param bool multiple: Are we expecting multiple frames.
:param obj connection_adapter: Provide custom connection adapter.
:return:
"""
if uuid not in self._response:
return
self._wait_for_request(
uuid, connection_adapter or self._default_connection_adapter
)
frame = self._get_response_frame(uuid)
if not multiple:
self.remove(uuid)
result = None
if raw:
result = frame
elif frame is not None:
result = dict(frame)
return result
def _get_response_frame(self, uuid):
"""Get a response frame.
:param str uuid: Rpc Identifier
:return:
"""
frame = None
frames = self._response.get(uuid, None)
if frames:
frame = frames.pop(0)
return frame
def _wait_for_request(self, uuid, connection_adapter=None):
"""Wait for RPC request to arrive.
:param str uuid: Rpc Identifier.
:param obj connection_adapter: Provide custom connection adapter.
:return:
"""
start_time = time.time()
while not self._response[uuid]:
connection_adapter.check_for_errors()
if time.time() - start_time > self._timeout:
self._raise_rpc_timeout_error(uuid)
time.sleep(IDLE_WAIT)
def _raise_rpc_timeout_error(self, uuid):
"""Gather information and raise an Rpc exception.
:param str uuid: Rpc Identifier.
:return:
"""
requests = []
for key, value in self._request.items():
if value == uuid:
requests.append(key)
self.remove(uuid)
message = (
'rpc requests %s (%s) took too long' %
(
uuid,
', '.join(requests)
)
)
raise AMQPChannelError(message) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/rpc.py | rpc.py |
import json
import uuid
from datetime import datetime
from amqpstorm.base import BaseMessage
from amqpstorm.compatibility import try_utf8_decode
from amqpstorm.exception import AMQPMessageError
class Message(BaseMessage):
"""RabbitMQ Message.
e.g.
::
# Message Properties.
properties = {
'content_type': 'text/plain',
'expiration': '3600',
'headers': {'key': 'value'},
}
# Create a new message.
message = Message.create(channel, 'Hello RabbitMQ!', properties)
# Publish the message to a queue called, 'my_queue'.
message.publish('my_queue')
:param Channel channel: AMQPStorm Channel
:param bytes,str,unicode body: Message payload
:param dict method: Message method
:param dict properties: Message properties
:param bool auto_decode: Auto-decode strings when possible. Does not
apply to to_dict, or to_tuple.
"""
__slots__ = [
'_decode_cache'
]
def __init__(self, channel, body=None, method=None, properties=None,
auto_decode=True):
super(Message, self).__init__(
channel, body, method, properties, auto_decode
)
self._decode_cache = dict()
@staticmethod
def create(channel, body, properties=None):
"""Create a new Message.
:param Channel channel: AMQPStorm Channel
:param bytes,str,unicode body: Message payload
:param dict properties: Message properties
:rtype: Message
"""
properties = dict(properties or {})
if 'correlation_id' not in properties:
properties['correlation_id'] = str(uuid.uuid4())
if 'message_id' not in properties:
properties['message_id'] = str(uuid.uuid4())
if 'timestamp' not in properties:
properties['timestamp'] = datetime.utcnow()
return Message(channel, auto_decode=False,
body=body, properties=properties)
@property
def body(self):
"""Return the Message Body.
If auto_decode is enabled, the body will automatically be
decoded using decode('utf-8') if possible.
:rtype: bytes,str,unicode
"""
if not self._auto_decode:
return self._body
if 'body' in self._decode_cache:
return self._decode_cache['body']
body = try_utf8_decode(self._body)
self._decode_cache['body'] = body
return body
@property
def channel(self):
"""Return the Channel used by this message.
:rtype: Channel
"""
return self._channel
@property
def method(self):
"""Return the Message Method.
If auto_decode is enabled, all strings will automatically be
decoded using decode('utf-8') if possible.
:rtype: dict
"""
return self._try_decode_utf8_content(self._method, 'method')
@property
def properties(self):
"""Returns the Message Properties.
If auto_decode is enabled, all strings will automatically be
decoded using decode('utf-8') if possible.
:rtype: dict
"""
return self._try_decode_utf8_content(self._properties, 'properties')
def ack(self):
"""Acknowledge Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self._method:
raise AMQPMessageError(
'Message.ack only available on incoming messages'
)
self._channel.basic.ack(delivery_tag=self.delivery_tag)
def nack(self, requeue=True):
"""Negative Acknowledgement.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:param bool requeue: Re-queue the message
"""
if not self._method:
raise AMQPMessageError(
'Message.nack only available on incoming messages'
)
self._channel.basic.nack(delivery_tag=self.delivery_tag,
requeue=requeue)
def reject(self, requeue=True):
"""Reject Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:param bool requeue: Re-queue the message
"""
if not self._method:
raise AMQPMessageError(
'Message.reject only available on incoming messages'
)
self._channel.basic.reject(delivery_tag=self.delivery_tag,
requeue=requeue)
def publish(self, routing_key, exchange='', mandatory=False,
immediate=False):
"""Publish Message.
:param str routing_key: Message routing key
:param str exchange: The exchange to publish the message to
:param bool mandatory: Requires the message is published
:param bool immediate: Request immediate delivery
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: bool,None
"""
return self._channel.basic.publish(body=self._body,
routing_key=routing_key,
exchange=exchange,
properties=self._properties,
mandatory=mandatory,
immediate=immediate)
@property
def app_id(self):
"""Get AMQP Message attribute: app_id.
:return:
"""
return self.properties.get('app_id')
@app_id.setter
def app_id(self, value):
"""Set AMQP Message attribute: app_id.
:return:
"""
self._update_properties('app_id', value)
@property
def message_id(self):
"""Get AMQP Message attribute: message_id.
:return:
"""
return self.properties.get('message_id')
@message_id.setter
def message_id(self, value):
"""Set AMQP Message attribute: message_id.
:return:
"""
self._update_properties('message_id', value)
@property
def content_encoding(self):
"""Get AMQP Message attribute: content_encoding.
:return:
"""
return self.properties.get('content_encoding')
@content_encoding.setter
def content_encoding(self, value):
"""Set AMQP Message attribute: content_encoding.
:return:
"""
self._update_properties('content_encoding', value)
@property
def content_type(self):
"""Get AMQP Message attribute: content_type.
:return:
"""
return self.properties.get('content_type')
@content_type.setter
def content_type(self, value):
"""Set AMQP Message attribute: content_type.
:return:
"""
self._update_properties('content_type', value)
@property
def correlation_id(self):
"""Get AMQP Message attribute: correlation_id.
:return:
"""
return self.properties.get('correlation_id')
@correlation_id.setter
def correlation_id(self, value):
"""Set AMQP Message attribute: correlation_id.
:return:
"""
self._update_properties('correlation_id', value)
@property
def delivery_mode(self):
"""Get AMQP Message attribute: delivery_mode.
:return:
"""
return self.properties.get('delivery_mode')
@delivery_mode.setter
def delivery_mode(self, value):
"""Set AMQP Message attribute: delivery_mode.
:return:
"""
self._update_properties('delivery_mode', value)
@property
def timestamp(self):
"""Get AMQP Message attribute: timestamp.
:return:
"""
return self.properties.get('timestamp')
@timestamp.setter
def timestamp(self, value):
"""Set AMQP Message attribute: timestamp.
:return:
"""
self._update_properties('timestamp', value)
@property
def priority(self):
"""Get AMQP Message attribute: priority.
:return:
"""
return self.properties.get('priority')
@priority.setter
def priority(self, value):
"""Set AMQP Message attribute: priority.
:return:
"""
self._update_properties('priority', value)
@property
def reply_to(self):
"""Get AMQP Message attribute: reply_to.
:return:
"""
return self.properties.get('reply_to')
@reply_to.setter
def reply_to(self, value):
"""Set AMQP Message attribute: reply_to.
:return:
"""
self._update_properties('reply_to', value)
@property
def message_type(self):
"""Get AMQP Message attribute: message_type.
:return:
"""
return self.properties.get('message_type')
@message_type.setter
def message_type(self, value):
"""Set AMQP Message attribute: message_type.
:return:
"""
self._update_properties('message_type', value)
@property
def expiration(self):
"""Get AMQP Message attribute: expiration.
:return:
"""
return self.properties.get('expiration')
@expiration.setter
def expiration(self, value):
"""Set AMQP Message attribute: expiration.
:return:
"""
self._update_properties('expiration', value)
@property
def user_id(self):
"""Get AMQP Message attribute: user_id.
:return:
"""
return self.properties.get('user_id')
@user_id.setter
def user_id(self, value):
"""Set AMQP Message attribute: user_id.
:return:
"""
self._update_properties('user_id', value)
@property
def redelivered(self):
"""Indicates if this message may have been delivered before (but not
acknowledged).
:rtype: bool,None
"""
if not self._method:
return None
return self._method.get('redelivered')
@property
def delivery_tag(self):
"""Server-assigned delivery tag.
:rtype: int,None
"""
if not self._method:
return None
return self._method.get('delivery_tag')
def json(self):
"""Deserialize the message body, if it is JSON.
:return:
"""
return json.loads(self.body)
def _update_properties(self, name, value):
"""Update properties, and keep cache up-to-date if auto decode is
enabled.
:param str name: Key
:param obj value: Value
:return:
"""
if self._auto_decode and 'properties' in self._decode_cache:
self._decode_cache['properties'][name] = value
self._properties[name] = value
def _try_decode_utf8_content(self, content, content_type):
"""Generic function to decode content.
:param object content:
:return:
"""
if not self._auto_decode or not content:
return content
if content_type in self._decode_cache:
return self._decode_cache[content_type]
if isinstance(content, dict):
content = self._try_decode_dict(content)
else:
content = try_utf8_decode(content)
self._decode_cache[content_type] = content
return content
def _try_decode_dict(self, content):
"""Decode content of a dictionary.
:param dict content:
:return:
"""
result = dict()
for key, value in content.items():
key = try_utf8_decode(key)
if isinstance(value, dict):
result[key] = self._try_decode_dict(value)
elif isinstance(value, list):
result[key] = self._try_decode_list(value)
elif isinstance(value, tuple):
result[key] = self._try_decode_tuple(value)
else:
result[key] = try_utf8_decode(value)
return result
@staticmethod
def _try_decode_list(content):
"""Decode content of a list.
:param list,tuple content:
:return:
"""
result = list()
for value in content:
result.append(try_utf8_decode(value))
return result
@staticmethod
def _try_decode_tuple(content):
"""Decode content of a tuple.
:param tuple content:
:return:
"""
return tuple(Message._try_decode_list(content)) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/message.py | message.py |
import sys
try:
import ssl
except ImportError:
ssl = None
try:
import simplejson as json # noqa
except ImportError:
import json # noqa
try:
import urlparse # noqa
except ImportError:
import urllib.parse as urlparse # noqa
try:
from urllib import quote # noqa
except ImportError:
from urllib.parse import quote # noqa
PYTHON3 = sys.version_info >= (3, 0, 0)
if PYTHON3:
RANGE = range
else:
RANGE = xrange
class DummyException(Exception):
"""
Never raised by anything.
This is used in except blocks if the intended
exception cannot be imported.
"""
SSL_CERT_MAP = {}
SSL_VERSIONS = {}
SSL_OPTIONS = [
'keyfile',
'certfile',
'cert_reqs',
'ssl_version',
'ca_certs',
'server_hostname',
]
def get_default_ssl_version():
"""Get the highest support TLS version, if none is available, return None.
:rtype: bool,None
"""
if hasattr(ssl, 'PROTOCOL_TLSv1_2'):
return ssl.PROTOCOL_TLSv1_2
elif hasattr(ssl, 'PROTOCOL_TLSv1_1'):
return ssl.PROTOCOL_TLSv1_1
elif hasattr(ssl, 'PROTOCOL_TLSv1'):
return ssl.PROTOCOL_TLSv1
return None
DEFAULT_SSL_VERSION = get_default_ssl_version()
SSL_SUPPORTED = DEFAULT_SSL_VERSION is not None
if SSL_SUPPORTED:
if hasattr(ssl, 'PROTOCOL_TLSv1_2'):
SSL_VERSIONS['protocol_tlsv1_2'] = ssl.PROTOCOL_TLSv1_2
if hasattr(ssl, 'PROTOCOL_TLSv1_1'):
SSL_VERSIONS['protocol_tlsv1_1'] = ssl.PROTOCOL_TLSv1_1
SSL_VERSIONS['protocol_tlsv1'] = ssl.PROTOCOL_TLSv1
SSL_CERT_MAP = {
'cert_none': ssl.CERT_NONE,
'cert_optional': ssl.CERT_OPTIONAL,
'cert_required': ssl.CERT_REQUIRED
}
SSLWantReadError = ssl.SSLWantReadError
else:
SSLWantReadError = DummyException
def is_string(obj):
"""Is this a string.
:param object obj:
:rtype: bool
"""
if PYTHON3:
str_type = (bytes, str)
else:
str_type = (bytes, str, unicode)
return isinstance(obj, str_type)
def is_integer(obj):
"""Is this an integer.
:param object obj:
:return:
"""
if PYTHON3:
return isinstance(obj, int)
return isinstance(obj, (int, long))
def is_unicode(obj):
"""Is this a unicode string.
This always returns False if running Python 3.x.
:param object obj:
:rtype: bool
"""
if PYTHON3:
return False
return isinstance(obj, unicode)
def try_utf8_decode(value):
"""Try to decode an object.
:param value:
:return:
"""
if not value or not is_string(value):
return value
elif PYTHON3 and not isinstance(value, bytes):
return value
elif not PYTHON3 and not isinstance(value, unicode):
return value
try:
return value.decode('utf-8')
except UnicodeDecodeError:
pass
return value
def patch_uri(uri):
"""If a custom uri schema is used with python 2.6 (e.g. amqps),
it will ignore some of the parsing logic.
As a work-around for this we change the amqp/amqps schema
internally to use http/https.
:param str uri: AMQP Connection string
:rtype: str
"""
index = uri.find(':')
if uri[:index] == 'amqps':
uri = uri.replace('amqps', 'https', 1)
elif uri[:index] == 'amqp':
uri = uri.replace('amqp', 'http', 1)
return uri | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/compatibility.py | compatibility.py |
import logging
import threading
import time
from time import sleep
from pamqp import exceptions as pamqp_exception
from pamqp import frame as pamqp_frame
from pamqp import header as pamqp_header
from pamqp import specification
from amqpstorm import compatibility
from amqpstorm.base import IDLE_WAIT
from amqpstorm.base import Stateful
from amqpstorm.channel import Channel
from amqpstorm.channel0 import Channel0
from amqpstorm.exception import AMQPConnectionError
from amqpstorm.exception import AMQPInvalidArgument
from amqpstorm.heartbeat import Heartbeat
from amqpstorm.io import EMPTY_BUFFER
from amqpstorm.io import IO
LOGGER = logging.getLogger(__name__)
DEFAULT_HEARTBEAT_INTERVAL = 60
DEFAULT_SOCKET_TIMEOUT = 10
DEFAULT_VIRTUAL_HOST = '/'
class Connection(Stateful):
"""RabbitMQ Connection.
e.g.
::
import amqpstorm
connection = amqpstorm.Connection('localhost', 'guest', 'guest')
Using a SSL Context:
::
import ssl
import amqpstorm
ssl_options = {
'context': ssl.create_default_context(cafile='ca_certificate.pem'),
'server_hostname': 'rmq.amqpstorm.io',
'check_hostname': True, # New 2.8.0, default is False
'verify_mode': 'required', # New 2.8.0, default is 'none'
}
connection = amqpstorm.Connection(
'rmq.amqpstorm.io', 'guest', 'guest', port=5671,
ssl=True, ssl_options=ssl_options
)
:param str hostname: Hostname
:param str username: Username
:param str password: Password
:param int port: Server port
:param str virtual_host: Virtual host
:param int heartbeat: RabbitMQ Heartbeat interval
:param int,float timeout: Socket timeout
:param bool ssl: Enable SSL
:param dict ssl_options: SSL kwargs
:param dict client_properties: None or dict of client properties
:param bool lazy: Lazy initialize the connection
:raises AMQPConnectionError: Raises if the connection
encountered an error.
"""
__slots__ = [
'heartbeat', 'parameters', '_channel0', '_channels', '_io'
]
def __init__(self, hostname, username, password, port=5672, **kwargs):
super(Connection, self).__init__()
self.lock = threading.RLock()
self.parameters = {
'hostname': hostname,
'username': username,
'password': password,
'port': port,
'virtual_host': kwargs.get('virtual_host', DEFAULT_VIRTUAL_HOST),
'heartbeat': kwargs.get('heartbeat', DEFAULT_HEARTBEAT_INTERVAL),
'timeout': kwargs.get('timeout', DEFAULT_SOCKET_TIMEOUT),
'ssl': kwargs.get('ssl', False),
'ssl_options': kwargs.get('ssl_options', {}),
'client_properties': kwargs.get('client_properties', {})
}
self._validate_parameters()
self._io = IO(self.parameters, exceptions=self._exceptions,
on_read_impl=self._read_buffer)
self._channel0 = Channel0(self, self.parameters['client_properties'])
self._channels = {}
self._last_channel_id = None
self.heartbeat = Heartbeat(self.parameters['heartbeat'],
self._channel0.send_heartbeat)
if not kwargs.get('lazy', False):
self.open()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, _):
if exception_type:
message = 'Closing connection due to an unhandled exception: %s'
LOGGER.warning(message, exception_value)
self.close()
@property
def channels(self):
"""Returns a dictionary of the Channels currently available.
:rtype: dict
"""
return self._channels
@property
def fileno(self):
"""Returns the Socket File number.
:rtype: integer,None
"""
if not self._io.socket:
return None
return self._io.socket.fileno()
@property
def is_blocked(self):
"""Is the connection currently being blocked from publishing by
the remote server.
:rtype: bool
"""
return self._channel0.is_blocked
@property
def max_allowed_channels(self):
"""Returns the maximum allowed channels for the connection.
:rtype: int
"""
return self._channel0.max_allowed_channels
@property
def max_frame_size(self):
"""Returns the maximum allowed frame size for the connection.
:rtype: int
"""
return self._channel0.max_frame_size
@property
def server_properties(self):
"""Returns the RabbitMQ Server Properties.
:rtype: dict
"""
return self._channel0.server_properties
@property
def socket(self):
"""Returns an instance of the Socket used by the Connection.
:rtype: socket.socket
"""
return self._io.socket
def channel(self, rpc_timeout=60, lazy=False):
"""Open a Channel.
:param int rpc_timeout: Timeout before we give up waiting for an RPC
response from the server.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: amqpstorm.Channel
"""
LOGGER.debug('Opening a new Channel')
if not compatibility.is_integer(rpc_timeout):
raise AMQPInvalidArgument('rpc_timeout should be an integer')
elif self.is_closed:
raise AMQPConnectionError('connection closed')
with self.lock:
channel_id = self._get_next_available_channel_id()
channel = Channel(channel_id, self, rpc_timeout)
self._channels[channel_id] = channel
if not lazy:
channel.open()
LOGGER.debug('Channel #%d Opened', channel_id)
return self._channels[channel_id]
def check_for_errors(self):
"""Check Connection for errors.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self.exceptions:
if not self.is_closed:
return
why = AMQPConnectionError('connection closed')
self.exceptions.append(why)
self.set_state(self.CLOSED)
self.close()
raise self.exceptions[0]
def close(self):
"""Close the Connection.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
LOGGER.debug('Connection Closing')
if not self.is_closed:
self.set_state(self.CLOSING)
self.heartbeat.stop()
try:
if not self.is_closed and self.socket:
self._channel0.send_close_connection()
self._wait_for_connection_state(state=Stateful.CLOSED)
except AMQPConnectionError:
pass
finally:
self._close_remaining_channels()
self._io.close()
self.set_state(self.CLOSED)
LOGGER.debug('Connection Closed')
def open(self):
"""Open Connection.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
"""
LOGGER.debug('Connection Opening')
self.set_state(self.OPENING)
self._exceptions = []
self._channels = {}
self._last_channel_id = None
self._io.open()
self._send_handshake()
self._wait_for_connection_state(state=Stateful.OPEN)
self.heartbeat.start(self._exceptions)
LOGGER.debug('Connection Opened')
def write_frame(self, channel_id, frame_out):
"""Marshal and write an outgoing pamqp frame to the Socket.
:param int channel_id: Channel ID.
:param specification.Frame frame_out: Amqp frame.
:return:
"""
frame_data = pamqp_frame.marshal(frame_out, channel_id)
self.heartbeat.register_write()
self._io.write_to_socket(frame_data)
def write_frames(self, channel_id, frames_out):
"""Marshal and write multiple outgoing pamqp frames to the Socket.
:param int channel_id: Channel ID/
:param list frames_out: Amqp frames.
:return:
"""
data_out = EMPTY_BUFFER
for single_frame in frames_out:
data_out += pamqp_frame.marshal(single_frame, channel_id)
self.heartbeat.register_write()
self._io.write_to_socket(data_out)
def _close_remaining_channels(self):
"""Forcefully close all open channels.
:return:
"""
for channel_id in list(self._channels):
self._channels[channel_id].set_state(Channel.CLOSED)
self._channels[channel_id].close()
self._cleanup_channel(channel_id)
def _get_next_available_channel_id(self):
"""Returns the next available available channel id.
:raises AMQPConnectionError: Raises if there is no available channel.
:rtype: int
"""
for channel_id in compatibility.RANGE(self._last_channel_id or 1,
self.max_allowed_channels + 1):
if channel_id in self._channels:
channel = self._channels[channel_id]
if channel.current_state != Channel.CLOSED:
continue
del self._channels[channel_id]
self._last_channel_id = channel_id
return channel_id
if self._last_channel_id:
self._last_channel_id = None
return self._get_next_available_channel_id()
raise AMQPConnectionError(
'reached the maximum number of channels %d' %
self.max_allowed_channels)
def _handle_amqp_frame(self, data_in):
"""Unmarshal a single AMQP frame and return the result.
:param data_in: socket data
:return: data_in, channel_id, frame
"""
if not data_in:
return data_in, None, None
try:
byte_count, channel_id, frame_in = pamqp_frame.unmarshal(data_in)
return data_in[byte_count:], channel_id, frame_in
except pamqp_exception.UnmarshalingException:
pass
except specification.AMQPFrameError as why:
LOGGER.error('AMQPFrameError: %r', why, exc_info=True)
except ValueError as why:
LOGGER.error(why, exc_info=True)
self.exceptions.append(AMQPConnectionError(why))
return data_in, None, None
def _read_buffer(self, data_in):
"""Process the socket buffer, and direct the data to the appropriate
channel.
:rtype: bytes
"""
while data_in:
data_in, channel_id, frame_in = self._handle_amqp_frame(data_in)
if frame_in is None:
break
self.heartbeat.register_read()
if channel_id == 0:
self._channel0.on_frame(frame_in)
elif channel_id in self._channels:
self._channels[channel_id].on_frame(frame_in)
return data_in
def _cleanup_channel(self, channel_id):
"""Remove the the channel from the list of available channels.
:param int channel_id: Channel id
:return:
"""
with self.lock:
if channel_id not in self._channels:
return
del self._channels[channel_id]
def _send_handshake(self):
"""Send a RabbitMQ Handshake.
:return:
"""
self._io.write_to_socket(pamqp_header.ProtocolHeader().marshal())
def _validate_parameters(self):
"""Validate Connection Parameters.
:return:
"""
if not compatibility.is_string(self.parameters['hostname']):
raise AMQPInvalidArgument('hostname should be a string')
elif not compatibility.is_integer(self.parameters['port']):
raise AMQPInvalidArgument('port should be an integer')
elif not compatibility.is_string(self.parameters['username']):
raise AMQPInvalidArgument('username should be a string')
elif not compatibility.is_string(self.parameters['password']):
raise AMQPInvalidArgument('password should be a string')
elif not compatibility.is_string(self.parameters['virtual_host']):
raise AMQPInvalidArgument('virtual_host should be a string')
elif not isinstance(self.parameters['timeout'], (int, float)):
raise AMQPInvalidArgument('timeout should be an integer or float')
elif not compatibility.is_integer(self.parameters['heartbeat']):
raise AMQPInvalidArgument('heartbeat should be an integer')
def _wait_for_connection_state(self, state=Stateful.OPEN, rpc_timeout=30):
"""Wait for a Connection state.
:param int state: State that we expect
:raises AMQPConnectionError: Raises if we are unable to establish
a connection to RabbitMQ.
:return:
"""
start_time = time.time()
while self.current_state != state:
self.check_for_errors()
if time.time() - start_time > rpc_timeout:
raise AMQPConnectionError('connection timed out')
sleep(IDLE_WAIT) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/connection.py | connection.py |
import locale
from amqpstorm.compatibility import is_string
from amqpstorm.exception import AMQPChannelError
AUTH_MECHANISM = 'PLAIN'
IDLE_WAIT = 0.01
LOCALE = locale.getlocale()[0] or 'en_US'
MAX_FRAME_SIZE = 131072
MAX_CHANNELS = 65535
class Stateful(object):
"""Stateful implementation."""
CLOSED = 0
CLOSING = 1
OPENING = 2
OPEN = 3
def __init__(self):
self._state = self.CLOSED
self._exceptions = []
def set_state(self, state):
"""Set State.
:param int state:
:return:
"""
self._state = state
@property
def current_state(self):
"""Get the State.
:rtype: int
"""
return self._state
@property
def is_closed(self):
"""Is Closed?
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Is Closing?
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_opening(self):
"""Is Opening?
:rtype: bool
"""
return self._state == self.OPENING
@property
def is_open(self):
"""Is Open?
:rtype: bool
"""
return self._state == self.OPEN
@property
def exceptions(self):
"""Stores all exceptions thrown by this instance.
This is useful for troubleshooting, and is used internally
to check the health of the connection.
:rtype: list
"""
return self._exceptions
class BaseChannel(Stateful):
"""Channel base class."""
__slots__ = [
'_channel_id', '_consumer_tags'
]
def __init__(self, channel_id):
super(BaseChannel, self).__init__()
self._consumer_tags = []
self._channel_id = channel_id
@property
def channel_id(self):
"""Get Channel id.
:rtype: int
"""
return self._channel_id
@property
def consumer_tags(self):
"""Get a list of consumer tags.
:rtype: list
"""
return self._consumer_tags
def add_consumer_tag(self, tag):
"""Add a Consumer tag.
:param str tag: Consumer tag.
:return:
"""
if not is_string(tag):
raise AMQPChannelError('consumer tag needs to be a string')
if tag not in self._consumer_tags:
self._consumer_tags.append(tag)
def remove_consumer_tag(self, tag=None):
"""Remove a Consumer tag.
If no tag is specified, all all tags will be removed.
:param str,None tag: Consumer tag.
:return:
"""
if tag is not None:
if tag in self._consumer_tags:
self._consumer_tags.remove(tag)
else:
self._consumer_tags = []
class BaseMessage(object):
"""Message base class.
:param Channel channel: AMQPStorm Channel
:param str,unicode body: Message body
:param dict method: Message method
:param dict properties: Message properties
:param bool auto_decode: This is not implemented in the base message class.
"""
__slots__ = [
'_auto_decode', '_body', '_channel', '_method', '_properties'
]
def __init__(self, channel, body=None, method=None, properties=None,
auto_decode=None):
self._auto_decode = auto_decode
self._channel = channel
self._body = body
self._method = method
self._properties = properties or {}
def __iter__(self):
for attribute in ['_body', '_channel', '_method', '_properties']:
yield attribute[1::], getattr(self, attribute)
def to_dict(self):
"""Message to Dictionary.
:rtype: dict
"""
return {
'body': self._body,
'method': self._method,
'properties': self._properties,
'channel': self._channel
}
def to_tuple(self):
"""Message to Tuple.
:rtype: tuple
"""
return self._body, self._channel, self._method, self._properties
class Handler(object):
"""Operations Handler (e.g. Queue, Exchange)"""
__slots__ = [
'_channel'
]
def __init__(self, channel):
self._channel = channel | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/base.py | base.py |
import logging
import platform
from pamqp import specification
from pamqp.heartbeat import Heartbeat
from amqpstorm import __version__
from amqpstorm.base import LOCALE
from amqpstorm.base import MAX_CHANNELS
from amqpstorm.base import MAX_FRAME_SIZE
from amqpstorm.base import Stateful
from amqpstorm.compatibility import try_utf8_decode
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class Channel0(object):
"""Internal Channel0 handler."""
def __init__(self, connection, client_properties=None):
super(Channel0, self).__init__()
self.is_blocked = False
self.max_allowed_channels = MAX_CHANNELS
self.max_frame_size = MAX_FRAME_SIZE
self.server_properties = {}
self._connection = connection
self._heartbeat = connection.parameters['heartbeat']
self._parameters = connection.parameters
self._override_client_properties = client_properties
def on_frame(self, frame_in):
"""Handle frames sent to Channel0.
:param frame_in: Amqp frame.
:return:
"""
LOGGER.debug('Frame Received: %s', frame_in.name)
if frame_in.name == 'Heartbeat':
return
elif frame_in.name == 'Connection.Close':
self._close_connection(frame_in)
elif frame_in.name == 'Connection.CloseOk':
self._close_connection_ok()
elif frame_in.name == 'Connection.Blocked':
self._blocked_connection(frame_in)
elif frame_in.name == 'Connection.Unblocked':
self._unblocked_connection()
elif frame_in.name == 'Connection.OpenOk':
self._set_connection_state(Stateful.OPEN)
elif frame_in.name == 'Connection.Start':
self.server_properties = frame_in.server_properties
self._send_start_ok(frame_in)
elif frame_in.name == 'Connection.Tune':
self._send_tune_ok(frame_in)
self._send_open_connection()
else:
LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
def send_close_connection(self):
"""Send Connection Close frame.
:return:
"""
self._write_frame(specification.Connection.Close())
def send_heartbeat(self):
"""Send Heartbeat frame.
:return:
"""
if not self._connection.is_open:
return
self._write_frame(Heartbeat())
def _close_connection(self, frame_in):
"""Connection Close.
:param specification.Connection.Close frame_in: Amqp frame.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
if frame_in.reply_code != 200:
reply_text = try_utf8_decode(frame_in.reply_text)
message = (
'Connection was closed by remote server: %s' % reply_text
)
exception = AMQPConnectionError(message,
reply_code=frame_in.reply_code)
self._connection.exceptions.append(exception)
def _close_connection_ok(self):
"""Connection CloseOk frame received.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
def _blocked_connection(self, frame_in):
"""Connection is Blocked.
:param frame_in:
:return:
"""
self.is_blocked = True
LOGGER.warning(
'Connection is blocked by remote server: %s',
try_utf8_decode(frame_in.reason)
)
def _negotiate(self, server_value, client_value):
"""Negotiate the highest supported value. Fall back on the
client side value if zero.
:param int server_value: Server Side value
:param int client_value: Client Side value
:rtype: int
:return:
"""
return min(server_value, client_value) or client_value
def _unblocked_connection(self):
"""Connection is Unblocked.
:return:
"""
self.is_blocked = False
LOGGER.info('Connection is no longer blocked by remote server')
def _plain_credentials(self):
"""AMQP Plain Credentials.
:rtype: str
"""
return '\0%s\0%s' % (self._parameters['username'],
self._parameters['password'])
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
mechanisms = try_utf8_decode(frame_in.mechanisms)
if 'EXTERNAL' in mechanisms:
mechanism = 'EXTERNAL'
credentials = '\0\0'
elif 'PLAIN' in mechanisms:
mechanism = 'PLAIN'
credentials = self._plain_credentials()
else:
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
frame_in.mechanisms
)
self._connection.exceptions.append(exception)
return
start_ok_frame = specification.Connection.StartOk(
mechanism=mechanism,
client_properties=self._client_properties(),
response=credentials,
locale=LOCALE
)
self._write_frame(start_ok_frame)
def _send_tune_ok(self, frame_in):
"""Send Tune OK frame.
:param specification.Connection.Tune frame_in: Tune frame.
:return:
"""
self.max_allowed_channels = self._negotiate(frame_in.channel_max,
MAX_CHANNELS)
self.max_frame_size = self._negotiate(frame_in.frame_max,
MAX_FRAME_SIZE)
LOGGER.debug(
'Negotiated max frame size %d, max channels %d',
self.max_frame_size, self.max_allowed_channels
)
tune_ok_frame = specification.Connection.TuneOk(
channel_max=self.max_allowed_channels,
frame_max=self.max_frame_size,
heartbeat=self._heartbeat)
self._write_frame(tune_ok_frame)
def _send_open_connection(self):
"""Send Open Connection frame.
:return:
"""
open_frame = specification.Connection.Open(
virtual_host=self._parameters['virtual_host']
)
self._write_frame(open_frame)
def _set_connection_state(self, state):
"""Set Connection state.
:param state:
:return:
"""
self._connection.set_state(state)
def _write_frame(self, frame_out):
"""Write a pamqp frame from Channel0.
:param frame_out: Amqp frame.
:return:
"""
self._connection.write_frame(0, frame_out)
LOGGER.debug('Frame Sent: %s', frame_out.name)
def _client_properties(self):
"""AMQPStorm Client Properties.
:rtype: dict
"""
client_properties = {
'product': 'AMQPStorm',
'platform': 'Python %s (%s)' % (platform.python_version(),
platform.python_implementation()),
'capabilities': {
'basic.nack': True,
'connection.blocked': True,
'publisher_confirms': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'information': 'See https://github.com/eandersson/amqpstorm',
'version': __version__
}
if self._override_client_properties:
client_properties.update(self._override_client_properties)
return client_properties | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/channel0.py | channel0.py |
import logging
import threading
import time
from pamqp import specification
from pamqp.header import ContentHeader
from amqpstorm import compatibility
from amqpstorm.base import BaseChannel
from amqpstorm.base import BaseMessage
from amqpstorm.base import IDLE_WAIT
from amqpstorm.basic import Basic
from amqpstorm.compatibility import try_utf8_decode
from amqpstorm.exception import AMQPError
from amqpstorm.exception import AMQPChannelError
from amqpstorm.exception import AMQPConnectionError
from amqpstorm.exception import AMQPInvalidArgument
from amqpstorm.exception import AMQPMessageError
from amqpstorm.exchange import Exchange
from amqpstorm.message import Message
from amqpstorm.queue import Queue
from amqpstorm.rpc import Rpc
from amqpstorm.tx import Tx
LOGGER = logging.getLogger(__name__)
CONTENT_FRAME = ['Basic.Deliver', 'ContentHeader', 'ContentBody']
class Channel(BaseChannel):
"""RabbitMQ Channel.
e.g.
::
channel = connection.channel()
"""
__slots__ = [
'_consumer_callbacks', 'rpc', '_basic', '_confirming_deliveries',
'_connection', '_exchange', '_inbound', '_queue', '_tx'
]
def __init__(self, channel_id, connection, rpc_timeout):
super(Channel, self).__init__(channel_id)
self.lock = threading.Lock()
self.rpc = Rpc(self, timeout=rpc_timeout)
self._consumer_callbacks = {}
self._confirming_deliveries = False
self._connection = connection
self._inbound = []
self._basic = Basic(self, connection.max_frame_size)
self._exchange = Exchange(self)
self._tx = Tx(self)
self._queue = Queue(self)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, _):
if exception_type:
LOGGER.warning(
'Closing channel due to an unhandled exception: %s',
exception_value
)
if not self.is_open:
return
self.close()
def __int__(self):
return self._channel_id
@property
def basic(self):
"""RabbitMQ Basic Operations.
e.g.
::
message = channel.basic.get(queue='hello_world')
:rtype: amqpstorm.basic.Basic
"""
return self._basic
@property
def exchange(self):
"""RabbitMQ Exchange Operations.
e.g.
::
channel.exchange.declare(exchange='hello_world')
:rtype: amqpstorm.exchange.Exchange
"""
return self._exchange
@property
def queue(self):
"""RabbitMQ Queue Operations.
e.g.
::
channel.queue.declare(queue='hello_world')
:rtype: amqpstorm.queue.Queue
"""
return self._queue
@property
def tx(self):
"""RabbitMQ Tx Operations.
e.g.
::
channel.tx.commit()
:rtype: amqpstorm.tx.Tx
"""
return self._tx
def build_inbound_messages(self, break_on_empty=False, to_tuple=False,
auto_decode=True, message_impl=None):
"""Build messages in the inbound queue.
:param bool break_on_empty: Should we break the loop when there are
no more messages in our inbound queue.
This does not guarantee that the queue
is emptied before the loop is broken, as
messages may be consumed faster then
they are being delivered by RabbitMQ,
causing the loop to be broken prematurely.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:param class message_impl: Optional message class to use, derived from
BaseMessage, for created messages. Defaults
to Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: :py:class:`generator`
"""
self.check_for_errors()
if message_impl:
if not issubclass(message_impl, BaseMessage):
raise AMQPInvalidArgument(
'message_impl must derive from BaseMessage'
)
else:
message_impl = Message
while not self.is_closed:
message = self._build_message(auto_decode=auto_decode,
message_impl=message_impl)
if not message:
self.check_for_errors()
time.sleep(IDLE_WAIT)
if break_on_empty and not self._inbound:
break
continue
if to_tuple:
yield message.to_tuple()
continue
yield message
def close(self, reply_code=200, reply_text=''):
"""Close Channel.
:param int reply_code: Close reply code (e.g. 200)
:param str reply_text: Close reply text
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not compatibility.is_integer(reply_code):
raise AMQPInvalidArgument('reply_code should be an integer')
elif not compatibility.is_string(reply_text):
raise AMQPInvalidArgument('reply_text should be a string')
try:
if self._connection.is_closed or not self.is_open:
self.stop_consuming()
LOGGER.debug('Channel #%d forcefully Closed', self.channel_id)
return
self.set_state(self.CLOSING)
LOGGER.debug('Channel #%d Closing', self.channel_id)
try:
self.stop_consuming()
except AMQPChannelError:
self.remove_consumer_tag()
self.rpc_request(specification.Channel.Close(
reply_code=reply_code,
reply_text=reply_text),
connection_adapter=self._connection
)
finally:
if self._inbound:
del self._inbound[:]
self.set_state(self.CLOSED)
LOGGER.debug('Channel #%d Closed', self.channel_id)
def check_for_errors(self,):
"""Check connection and channel for errors.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
try:
self._connection.check_for_errors()
except AMQPConnectionError:
self.set_state(self.CLOSED)
raise
self.check_for_exceptions()
if self.is_closed:
raise AMQPChannelError('channel closed')
def check_for_exceptions(self):
"""Check channel for exceptions.
:raises AMQPChannelError: Raises if the channel encountered an error.
:return:
"""
if self.exceptions:
exception = self.exceptions[0]
if self.is_open:
self.exceptions.pop(0)
raise exception
def confirm_deliveries(self):
"""Set the channel to confirm that each message has been
successfully delivered.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
self._confirming_deliveries = True
confirm_frame = specification.Confirm.Select()
return self.rpc_request(confirm_frame)
@property
def confirming_deliveries(self):
"""Is the channel set to confirm deliveries.
:return:
"""
return self._confirming_deliveries
def on_frame(self, frame_in):
"""Handle frame sent to this specific channel.
:param pamqp.Frame frame_in: Amqp frame.
:return:
"""
if self.rpc.on_frame(frame_in):
return
if frame_in.name in CONTENT_FRAME:
self._inbound.append(frame_in)
elif frame_in.name == 'Basic.Cancel':
self._basic_cancel(frame_in)
elif frame_in.name == 'Basic.CancelOk':
self.remove_consumer_tag(frame_in.consumer_tag)
elif frame_in.name == 'Basic.ConsumeOk':
self.add_consumer_tag(frame_in['consumer_tag'])
elif frame_in.name == 'Basic.Return':
self._basic_return(frame_in)
elif frame_in.name == 'Channel.Close':
self._close_channel(frame_in)
elif frame_in.name == 'Channel.Flow':
self.write_frame(specification.Channel.FlowOk(frame_in.active))
else:
LOGGER.error(
'[Channel%d] Unhandled Frame: %s -- %s',
self.channel_id, frame_in.name, dict(frame_in)
)
def open(self):
"""Open Channel.
:return:
"""
self._inbound = []
self._exceptions = []
self._confirming_deliveries = False
self.set_state(self.OPENING)
self.rpc_request(specification.Channel.Open())
self.set_state(self.OPEN)
def process_data_events(self, to_tuple=False, auto_decode=True):
"""Consume inbound messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self._consumer_callbacks:
raise AMQPChannelError('no consumer callback defined')
for message in self.build_inbound_messages(break_on_empty=True,
auto_decode=auto_decode):
consumer_tag = message._method.get('consumer_tag')
if to_tuple:
# noinspection PyCallingNonCallable
self._consumer_callbacks[consumer_tag](*message.to_tuple())
continue
# noinspection PyCallingNonCallable
self._consumer_callbacks[consumer_tag](message)
def rpc_request(self, frame_out, connection_adapter=None):
"""Perform a RPC Request.
:param specification.Frame frame_out: Amqp frame.
:rtype: dict
"""
with self.rpc.lock:
uuid = self.rpc.register_request(frame_out.valid_responses)
self._connection.write_frame(self.channel_id, frame_out)
return self.rpc.get_request(
uuid, connection_adapter=connection_adapter
)
def start_consuming(self, to_tuple=False, auto_decode=True):
"""Start consuming messages.
:param bool to_tuple: Should incoming messages be converted to a
tuple before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
while not self.is_closed:
self.process_data_events(
to_tuple=to_tuple,
auto_decode=auto_decode
)
if self.consumer_tags:
time.sleep(IDLE_WAIT)
continue
break
def stop_consuming(self):
"""Stop consuming messages.
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self.consumer_tags:
return
if not self.is_closed:
for tag in self.consumer_tags:
self.basic.cancel(tag)
self.remove_consumer_tag()
def write_frame(self, frame_out):
"""Write a pamqp frame from the current channel.
:param specification.Frame frame_out: A single pamqp frame.
:return:
"""
self.check_for_errors()
self._connection.write_frame(self.channel_id, frame_out)
def write_frames(self, frames_out):
"""Write multiple pamqp frames from the current channel.
:param list frames_out: A list of pamqp frames.
:return:
"""
self.check_for_errors()
self._connection.write_frames(self.channel_id, frames_out)
def _basic_cancel(self, frame_in):
"""Handle a Basic Cancel frame.
:param specification.Basic.Cancel frame_in: Amqp frame.
:return:
"""
LOGGER.warning(
'Received Basic.Cancel on consumer_tag: %s',
try_utf8_decode(frame_in.consumer_tag)
)
self.remove_consumer_tag(frame_in.consumer_tag)
def _basic_return(self, frame_in):
"""Handle a Basic Return Frame and treat it as an error.
:param specification.Basic.Return frame_in: Amqp frame.
:return:
"""
reply_text = try_utf8_decode(frame_in.reply_text)
message = (
"Message not delivered: %s (%s) to queue '%s' from exchange '%s'" %
(
reply_text,
frame_in.reply_code,
frame_in.routing_key,
frame_in.exchange
)
)
exception = AMQPMessageError(message,
reply_code=frame_in.reply_code)
self.exceptions.append(exception)
def _build_message(self, auto_decode, message_impl):
"""Fetch and build a complete Message from the inbound queue.
:param bool auto_decode: Auto-decode strings when possible.
:param class message_impl: Message implementation from BaseMessage
:rtype: Message
"""
with self.lock:
if len(self._inbound) < 2:
return None
headers = self._build_message_headers()
if not headers:
return None
basic_deliver, content_header = headers
body = self._build_message_body(content_header.body_size)
message = message_impl(channel=self,
body=body,
method=dict(basic_deliver),
properties=dict(content_header.properties),
auto_decode=auto_decode)
return message
def _build_message_headers(self):
"""Fetch Message Headers (Deliver & Header Frames).
:rtype: tuple,None
"""
basic_deliver = self._inbound.pop(0)
if not isinstance(basic_deliver, specification.Basic.Deliver):
LOGGER.warning(
'Received an out-of-order frame: %s was '
'expecting a Basic.Deliver frame',
type(basic_deliver)
)
return None
content_header = self._inbound.pop(0)
if not isinstance(content_header, ContentHeader):
LOGGER.warning(
'Received an out-of-order frame: %s was '
'expecting a ContentHeader frame',
type(content_header)
)
return None
return basic_deliver, content_header
def _build_message_body(self, body_size):
"""Build the Message body from the inbound queue.
:rtype: str
"""
body = bytes()
while len(body) < body_size:
if not self._inbound:
self.check_for_errors()
time.sleep(IDLE_WAIT)
continue
body_piece = self._inbound.pop(0)
if not body_piece.value:
break
body += body_piece.value
return body
def _close_channel(self, frame_in):
"""Close Channel.
:param specification.Channel.Close frame_in: Channel Close frame.
:return:
"""
self.set_state(self.CLOSING)
if not self._connection.is_closed:
try:
self.write_frame(specification.Channel.CloseOk())
except AMQPError:
pass
self.remove_consumer_tag()
if self._inbound:
del self._inbound[:]
self.exceptions.append(AMQPChannelError(
'Channel %d was closed by remote server: %s' %
(
self._channel_id,
try_utf8_decode(frame_in.reply_text)
),
reply_code=frame_in.reply_code
))
self.set_state(self.CLOSED) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/channel.py | channel.py |
import logging
import math
from pamqp import body as pamqp_body
from pamqp import header as pamqp_header
from pamqp import specification
from amqpstorm import compatibility
from amqpstorm.base import BaseMessage
from amqpstorm.base import Handler
from amqpstorm.base import MAX_FRAME_SIZE
from amqpstorm.exception import AMQPChannelError
from amqpstorm.exception import AMQPInvalidArgument
from amqpstorm.message import Message
LOGGER = logging.getLogger(__name__)
class Basic(Handler):
"""RabbitMQ Basic Operations."""
__slots__ = ['_max_frame_size']
def __init__(self, channel, max_frame_size=None):
super(Basic, self).__init__(channel)
self._max_frame_size = max_frame_size or MAX_FRAME_SIZE
def qos(self, prefetch_count=0, prefetch_size=0, global_=False):
"""Specify quality of service.
:param int prefetch_count: Prefetch window in messages
:param int/long prefetch_size: Prefetch window in octets
:param bool global_: Apply to entire connection
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_integer(prefetch_count):
raise AMQPInvalidArgument('prefetch_count should be an integer')
elif not compatibility.is_integer(prefetch_size):
raise AMQPInvalidArgument('prefetch_size should be an integer')
elif not isinstance(global_, bool):
raise AMQPInvalidArgument('global_ should be a boolean')
qos_frame = specification.Basic.Qos(prefetch_count=prefetch_count,
prefetch_size=prefetch_size,
global_=global_)
return self._channel.rpc_request(qos_frame)
def get(self, queue='', no_ack=False, to_dict=False, auto_decode=True,
message_impl=None):
"""Fetch a single message.
:param str queue: Queue name
:param bool no_ack: No acknowledgement needed
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param bool auto_decode: Auto-decode strings when possible.
:param class message_impl: Message implementation based on BaseMessage
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:returns: Returns a single message, as long as there is a message in
the queue. If no message is available, returns None.
:rtype: amqpstorm.Message,dict,None
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not isinstance(no_ack, bool):
raise AMQPInvalidArgument('no_ack should be a boolean')
elif self._channel.consumer_tags:
raise AMQPChannelError("Cannot call 'get' when channel is "
"set to consume")
if message_impl:
if not issubclass(message_impl, BaseMessage):
raise AMQPInvalidArgument(
'message_impl should be derived from BaseMessage'
)
else:
message_impl = Message
get_frame = specification.Basic.Get(queue=queue,
no_ack=no_ack)
with self._channel.lock and self._channel.rpc.lock:
message = self._get_message(get_frame, auto_decode=auto_decode,
message_impl=message_impl)
if message and to_dict:
return message.to_dict()
return message
def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
:param bool requeue: Re-queue the messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not isinstance(requeue, bool):
raise AMQPInvalidArgument('requeue should be a boolean')
recover_frame = specification.Basic.Recover(requeue=requeue)
return self._channel.rpc_request(recover_frame)
def consume(self, callback=None, queue='', consumer_tag='',
exclusive=False, no_ack=False, no_local=False, arguments=None):
"""Start a queue consumer.
:param typing.Callable callback: Message callback
:param str queue: Queue name
:param str consumer_tag: Consumer tag
:param bool no_local: Do not deliver own messages
:param bool no_ack: No acknowledgement needed
:param bool exclusive: Request exclusive access
:param dict arguments: Consume key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:returns: Consumer tag
:rtype: str
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not compatibility.is_string(consumer_tag):
raise AMQPInvalidArgument('consumer_tag should be a string')
elif not isinstance(exclusive, bool):
raise AMQPInvalidArgument('exclusive should be a boolean')
elif not isinstance(no_ack, bool):
raise AMQPInvalidArgument('no_ack should be a boolean')
elif not isinstance(no_local, bool):
raise AMQPInvalidArgument('no_local should be a boolean')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
consume_rpc_result = self._consume_rpc_request(arguments, consumer_tag,
exclusive, no_ack,
no_local, queue)
tag = self._consume_add_and_get_tag(consume_rpc_result)
self._channel._consumer_callbacks[tag] = callback
return tag
def cancel(self, consumer_tag=''):
"""Cancel a queue consumer.
:param str consumer_tag: Consumer tag
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(consumer_tag):
raise AMQPInvalidArgument('consumer_tag should be a string')
cancel_frame = specification.Basic.Cancel(consumer_tag=consumer_tag)
result = self._channel.rpc_request(cancel_frame)
self._channel.remove_consumer_tag(consumer_tag)
return result
def publish(self, body, routing_key, exchange='', properties=None,
mandatory=False, immediate=False):
"""Publish a Message.
:param bytes,str,unicode body: Message payload
:param str routing_key: Message routing key
:param str exchange: The exchange to publish the message to
:param dict properties: Message properties
:param bool mandatory: Requires the message is published
:param bool immediate: Request immediate delivery
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: bool,None
"""
self._validate_publish_parameters(body, exchange, immediate, mandatory,
properties, routing_key)
properties = properties or {}
body = self._handle_utf8_payload(body, properties)
properties = specification.Basic.Properties(**properties)
method_frame = specification.Basic.Publish(exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
header_frame = pamqp_header.ContentHeader(body_size=len(body),
properties=properties)
frames_out = [method_frame, header_frame]
for body_frame in self._create_content_body(body):
frames_out.append(body_frame)
if self._channel.confirming_deliveries:
with self._channel.rpc.lock:
return self._publish_confirm(frames_out, mandatory)
self._channel.write_frames(frames_out)
def ack(self, delivery_tag=0, multiple=False):
"""Acknowledge Message.
:param int/long delivery_tag: Server-assigned delivery tag
:param bool multiple: Acknowledge multiple messages
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not compatibility.is_integer(delivery_tag):
raise AMQPInvalidArgument('delivery_tag should be an integer')
elif not isinstance(multiple, bool):
raise AMQPInvalidArgument('multiple should be a boolean')
ack_frame = specification.Basic.Ack(delivery_tag=delivery_tag,
multiple=multiple)
self._channel.write_frame(ack_frame)
def nack(self, delivery_tag=0, multiple=False, requeue=True):
"""Negative Acknowledgement.
:param int/long delivery_tag: Server-assigned delivery tag
:param bool multiple: Negative acknowledge multiple messages
:param bool requeue: Re-queue the message
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not compatibility.is_integer(delivery_tag):
raise AMQPInvalidArgument('delivery_tag should be an integer')
elif not isinstance(multiple, bool):
raise AMQPInvalidArgument('multiple should be a boolean')
elif not isinstance(requeue, bool):
raise AMQPInvalidArgument('requeue should be a boolean')
nack_frame = specification.Basic.Nack(delivery_tag=delivery_tag,
multiple=multiple,
requeue=requeue)
self._channel.write_frame(nack_frame)
def reject(self, delivery_tag=0, requeue=True):
"""Reject Message.
:param int/long delivery_tag: Server-assigned delivery tag
:param bool requeue: Re-queue the message
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not compatibility.is_integer(delivery_tag):
raise AMQPInvalidArgument('delivery_tag should be an integer')
elif not isinstance(requeue, bool):
raise AMQPInvalidArgument('requeue should be a boolean')
reject_frame = specification.Basic.Reject(delivery_tag=delivery_tag,
requeue=requeue)
self._channel.write_frame(reject_frame)
def _consume_add_and_get_tag(self, consume_rpc_result):
"""Add the tag to the channel and return it.
:param dict consume_rpc_result:
:rtype: str
"""
consumer_tag = consume_rpc_result['consumer_tag']
self._channel.add_consumer_tag(consumer_tag)
return consumer_tag
def _consume_rpc_request(self, arguments, consumer_tag, exclusive, no_ack,
no_local, queue):
"""Create a Consume Frame and execute a RPC request.
:param str queue: Queue name
:param str consumer_tag: Consumer tag
:param bool no_local: Do not deliver own messages
:param bool no_ack: No acknowledgement needed
:param bool exclusive: Request exclusive access
:param dict arguments: Consume key/value arguments
:rtype: dict
"""
consume_frame = specification.Basic.Consume(queue=queue,
consumer_tag=consumer_tag,
exclusive=exclusive,
no_local=no_local,
no_ack=no_ack,
arguments=arguments)
return self._channel.rpc_request(consume_frame)
@staticmethod
def _validate_publish_parameters(body, exchange, immediate, mandatory,
properties, routing_key):
"""Validate Publish Parameters.
:param bytes,str,unicode body: Message payload
:param str routing_key: Message routing key
:param str exchange: The exchange to publish the message to
:param dict properties: Message properties
:param bool mandatory: Requires the message is published
:param bool immediate: Request immediate delivery
:raises AMQPInvalidArgument: Invalid Parameters
:return:
"""
if not compatibility.is_string(body):
raise AMQPInvalidArgument('body should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif properties is not None and not isinstance(properties, dict):
raise AMQPInvalidArgument('properties should be a dict or None')
elif not isinstance(mandatory, bool):
raise AMQPInvalidArgument('mandatory should be a boolean')
elif not isinstance(immediate, bool):
raise AMQPInvalidArgument('immediate should be a boolean')
@staticmethod
def _handle_utf8_payload(body, properties):
"""Update the Body and Properties to the appropriate encoding.
:param bytes,str,unicode body: Message payload
:param dict properties: Message properties
:return:
"""
if 'content_encoding' not in properties:
properties['content_encoding'] = 'utf-8'
encoding = properties['content_encoding']
if compatibility.is_unicode(body):
body = body.encode(encoding)
elif compatibility.PYTHON3 and isinstance(body, str):
body = bytes(body, encoding=encoding)
return body
def _get_message(self, get_frame, auto_decode, message_impl):
"""Get and return a message using a Basic.Get frame.
:param Basic.Get get_frame:
:param bool auto_decode: Auto-decode strings when possible.
:param class message_impl: Message implementation based on BaseMessage
:rtype: Message
"""
message_uuid = self._channel.rpc.register_request(
get_frame.valid_responses + ['ContentHeader', 'ContentBody']
)
try:
self._channel.write_frame(get_frame)
get_ok_frame = self._channel.rpc.get_request(message_uuid,
raw=True,
multiple=True)
if isinstance(get_ok_frame, specification.Basic.GetEmpty):
return None
content_header = self._channel.rpc.get_request(message_uuid,
raw=True,
multiple=True)
body = self._get_content_body(message_uuid,
content_header.body_size)
finally:
self._channel.rpc.remove(message_uuid)
return message_impl(channel=self._channel,
body=body,
method=dict(get_ok_frame),
properties=dict(content_header.properties),
auto_decode=auto_decode)
def _publish_confirm(self, frames_out, mandatory):
"""Confirm that message was published successfully.
:param list frames_out:
:rtype: bool
"""
confirm_uuid = self._channel.rpc.register_request(['Basic.Ack',
'Basic.Nack'])
self._channel.write_frames(frames_out)
result = self._channel.rpc.get_request(confirm_uuid, raw=True)
if mandatory:
self._channel.check_for_exceptions()
if isinstance(result, specification.Basic.Ack):
return True
return False
def _create_content_body(self, body):
"""Split body based on the maximum frame size.
This function is based on code from Rabbitpy.
https://github.com/gmr/rabbitpy
:param bytes,str,unicode body: Message payload
:rtype: collections.Iterable
"""
frames = int(math.ceil(len(body) / float(self._max_frame_size)))
for offset in compatibility.RANGE(0, frames):
start_frame = self._max_frame_size * offset
end_frame = start_frame + self._max_frame_size
body_len = len(body)
if end_frame > body_len:
end_frame = body_len
yield pamqp_body.ContentBody(body[start_frame:end_frame])
def _get_content_body(self, message_uuid, body_size):
"""Get Content Body using RPC requests.
:param str uuid_body: Rpc Identifier.
:param int body_size: Content Size.
:rtype: str
"""
body = bytes()
while len(body) < body_size:
body_piece = self._channel.rpc.get_request(message_uuid, raw=True,
multiple=True)
if not body_piece.value:
break
body += body_piece.value
return body | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/basic.py | basic.py |
import logging
import threading
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class Heartbeat(object):
"""Internal Heartbeat handler."""
def __init__(self, interval, send_heartbeat_impl, timer=threading.Timer):
self.send_heartbeat_impl = send_heartbeat_impl
self.timer_impl = timer
self._lock = threading.Lock()
self._running = threading.Event()
self._timer = None
self._exceptions = None
self._reads_since_check = 0
self._writes_since_check = 0
self._interval = interval
self._threshold = 0
def register_read(self):
"""Register that a frame has been received.
:return:
"""
self._reads_since_check += 1
def register_write(self):
"""Register that a frame has been sent.
:return:
"""
self._writes_since_check += 1
def start(self, exceptions):
"""Start the Heartbeat Checker.
:param list exceptions:
:return:
"""
if not self._interval:
return False
self._running.set()
with self._lock:
self._threshold = 0
self._reads_since_check = 0
self._writes_since_check = 0
self._exceptions = exceptions
LOGGER.debug('Heartbeat Checker Started')
return self._start_new_timer()
def stop(self):
"""Stop the Heartbeat Checker.
:return:
"""
self._running.clear()
with self._lock:
if self._timer:
self._timer.cancel()
self._timer = None
def _check_for_life_signs(self):
"""Check Connection for life signs.
First check if any data has been sent, if not send a heartbeat
to the remote server.
If we have not received any data what so ever within two
intervals, we need to raise an exception so that we can
close the connection.
:rtype: bool
"""
if not self._running.is_set():
return False
if self._writes_since_check == 0:
self.send_heartbeat_impl()
self._lock.acquire()
try:
if self._reads_since_check == 0:
self._threshold += 1
if self._threshold >= 2:
self._running.clear()
self._raise_or_append_exception()
return False
else:
self._threshold = 0
finally:
self._reads_since_check = 0
self._writes_since_check = 0
self._lock.release()
return self._start_new_timer()
def _raise_or_append_exception(self):
"""The connection is presumably dead and we need to raise or
append an exception.
If we have a list for exceptions, append the exception and let
the connection handle it, if not raise the exception here.
:return:
"""
message = (
'Connection dead, no heartbeat or data received in >= '
'%ds' % (
self._interval * 2
)
)
why = AMQPConnectionError(message)
if self._exceptions is None:
raise why
self._exceptions.append(why)
def _start_new_timer(self):
"""Create a timer that will be used to periodically check the
connection for heartbeats.
:return:
"""
if not self._running.is_set():
return False
self._timer = self.timer_impl(
interval=self._interval,
function=self._check_for_life_signs
)
self._timer.daemon = True
self._timer.start()
return True | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/heartbeat.py | heartbeat.py |
import logging
from pamqp.specification import Exchange as pamqp_exchange
from amqpstorm import compatibility
from amqpstorm.base import Handler
from amqpstorm.exception import AMQPInvalidArgument
LOGGER = logging.getLogger(__name__)
class Exchange(Handler):
"""RabbitMQ Exchange Operations."""
__slots__ = []
def declare(self, exchange='', exchange_type='direct', passive=False,
durable=False, auto_delete=False, arguments=None):
"""Declare an Exchange.
:param str exchange: Exchange name
:param str exchange_type: Exchange type
:param bool passive: Do not create
:param bool durable: Durable exchange
:param bool auto_delete: Automatically delete when not in use
:param dict arguments: Exchange key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif not compatibility.is_string(exchange_type):
raise AMQPInvalidArgument('exchange_type should be a string')
elif not isinstance(passive, bool):
raise AMQPInvalidArgument('passive should be a boolean')
elif not isinstance(durable, bool):
raise AMQPInvalidArgument('durable should be a boolean')
elif not isinstance(auto_delete, bool):
raise AMQPInvalidArgument('auto_delete should be a boolean')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
declare_frame = pamqp_exchange.Declare(exchange=exchange,
exchange_type=exchange_type,
passive=passive,
durable=durable,
auto_delete=auto_delete,
arguments=arguments)
return self._channel.rpc_request(declare_frame)
def delete(self, exchange='', if_unused=False):
"""Delete an Exchange.
:param str exchange: Exchange name
:param bool if_unused: Delete only if unused
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
delete_frame = pamqp_exchange.Delete(exchange=exchange,
if_unused=if_unused)
return self._channel.rpc_request(delete_frame)
def bind(self, destination='', source='', routing_key='',
arguments=None):
"""Bind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to bind to
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_exchange.Bind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame)
def unbind(self, destination='', source='', routing_key='',
arguments=None):
"""Unbind an Exchange.
:param str destination: Exchange name
:param str source: Exchange to unbind from
:param str routing_key: The routing key used
:param dict arguments: Unbind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(destination):
raise AMQPInvalidArgument('destination should be a string')
elif not compatibility.is_string(source):
raise AMQPInvalidArgument('source should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
unbind_frame = pamqp_exchange.Unbind(destination=destination,
source=source,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(unbind_frame) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/exchange.py | exchange.py |
import logging
from pamqp.specification import Queue as pamqp_queue
from amqpstorm import compatibility
from amqpstorm.base import Handler
from amqpstorm.exception import AMQPInvalidArgument
LOGGER = logging.getLogger(__name__)
class Queue(Handler):
"""RabbitMQ Queue Operations."""
__slots__ = []
def declare(self, queue='', passive=False, durable=False,
exclusive=False, auto_delete=False, arguments=None):
"""Declare a Queue.
:param str queue: Queue name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool exclusive: Request exclusive access
:param bool auto_delete: Automatically delete when not in use
:param dict arguments: Queue key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not isinstance(passive, bool):
raise AMQPInvalidArgument('passive should be a boolean')
elif not isinstance(durable, bool):
raise AMQPInvalidArgument('durable should be a boolean')
elif not isinstance(exclusive, bool):
raise AMQPInvalidArgument('exclusive should be a boolean')
elif not isinstance(auto_delete, bool):
raise AMQPInvalidArgument('auto_delete should be a boolean')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
declare_frame = pamqp_queue.Declare(queue=queue,
passive=passive,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete,
arguments=arguments)
return self._channel.rpc_request(declare_frame)
def delete(self, queue='', if_unused=False, if_empty=False):
"""Delete a Queue.
:param str queue: Queue name
:param bool if_unused: Delete only if unused
:param bool if_empty: Delete only if empty
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not isinstance(if_unused, bool):
raise AMQPInvalidArgument('if_unused should be a boolean')
elif not isinstance(if_empty, bool):
raise AMQPInvalidArgument('if_empty should be a boolean')
delete_frame = pamqp_queue.Delete(queue=queue, if_unused=if_unused,
if_empty=if_empty)
return self._channel.rpc_request(delete_frame)
def purge(self, queue):
"""Purge a Queue.
:param str queue: Queue name
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
purge_frame = pamqp_queue.Purge(queue=queue)
return self._channel.rpc_request(purge_frame)
def bind(self, queue='', exchange='', routing_key='', arguments=None):
"""Bind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param dict arguments: Bind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
bind_frame = pamqp_queue.Bind(queue=queue,
exchange=exchange,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(bind_frame)
def unbind(self, queue='', exchange='', routing_key='', arguments=None):
"""Unbind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key used
:param dict arguments: Unbind key/value arguments
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict
"""
if not compatibility.is_string(queue):
raise AMQPInvalidArgument('queue should be a string')
elif not compatibility.is_string(exchange):
raise AMQPInvalidArgument('exchange should be a string')
elif not compatibility.is_string(routing_key):
raise AMQPInvalidArgument('routing_key should be a string')
elif arguments is not None and not isinstance(arguments, dict):
raise AMQPInvalidArgument('arguments should be a dict or None')
unbind_frame = pamqp_queue.Unbind(queue=queue,
exchange=exchange,
routing_key=routing_key,
arguments=arguments)
return self._channel.rpc_request(unbind_frame) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/queue.py | queue.py |
AMQP_ERROR_MAPPING = {
311: ('CONTENT-TOO-LARGE',
'The client attempted to transfer content larger than the '
'server could accept at the present time. The client may '
'retry at a later time.'),
312: ('NO-ROUTE', 'Undocumented AMQP Soft Error'),
313: ('NO-CONSUMERS',
'When the exchange cannot deliver to a consumer when the '
'immediate flag is set. As a result of pending data on '
'the queue or the absence of any consumers of the queue.'),
320: ('CONNECTION-FORCED',
'An operator intervened to close the connection for some reason. '
'The client may retry at some later date.'),
402: ('INVALID-PATH',
'The client tried to work with an unknown virtual host.'),
403: ('ACCESS-REFUSED',
'The client attempted to work with a server entity to which '
'has no access due to security settings.'),
404: ('NOT-FOUND',
'The client attempted to work with a server '
'entity that does not exist.'),
405: ('RESOURCE-LOCKED',
'The client attempted to work with a server entity to which it '
'has no access because another client is working with it.'),
406: ('PRECONDITION-FAILED',
'The client requested a method that was not '
'allowed because some precondition failed.'),
501: ('FRAME-ERROR',
'The sender sent a malformed frame that the recipient could '
'not decode. This strongly implies a programming error in '
'the sending peer.'),
502: ('SYNTAX-ERROR',
'The sender sent a frame that contained illegal values for '
'one or more fields. This strongly implies a programming '
'error in the sending peer.'),
503: ('COMMAND-INVALID',
'The client sent an invalid sequence of frames, attempting to '
'perform an operation that was considered invalid by the server. '
'This usually implies a programming error in the client.'),
504: ('CHANNEL-ERROR',
'The client attempted to work with a channel that had not '
'been correctly opened. This most likely indicates a '
'fault in the client layer.'),
505: ('UNEXPECTED-FRAME',
'The peer sent a frame that was not expected, usually in the '
'context of a content header and body. This strongly '
'indicates a fault in the peer\'s content processing.'),
506: ('RESOURCE-ERROR',
'The server could not complete the method because it lacked '
'sufficient resources. This may be due to the client '
'creating too many of some type of entity.'),
530: ('NOT-ALLOWED',
'The client tried to work with some entity in a manner '
'that is prohibited by the server, due to security '
'settings or by some other criteria.'),
540: ('NOT-IMPLEMENTED',
'The client tried to use functionality that is '
'notimplemented in the server.'),
541: ('INTERNAL-ERROR',
'The server could not complete the method because of an '
'internal error. The server may require intervention by '
'an operator in order to resume normal operations.')
}
class AMQPError(IOError):
"""General AMQP Error.
Exceptions raised by AMQPStorm are mapped based to the
AMQP 0.9.1 specifications (when applicable).
e.g.
::
except AMQPChannelError as why:
if why.error_code == 312:
self.channel.queue.declare(queue_name)
"""
_documentation = None
_error_code = None
_error_type = None
@property
def documentation(self):
"""AMQP Documentation string."""
return self._documentation or bytes()
@property
def error_code(self):
"""AMQP Error Code - A 3-digit reply code."""
return self._error_code
@property
def error_type(self):
"""AMQP Error Type e.g. NOT-FOUND."""
return self._error_type
def __init__(self, *args, **kwargs):
self._error_code = kwargs.pop('reply_code', None)
super(AMQPError, self).__init__(*args, **kwargs)
if self._error_code not in AMQP_ERROR_MAPPING:
return
self._error_type = AMQP_ERROR_MAPPING[self._error_code][0]
self._documentation = AMQP_ERROR_MAPPING[self._error_code][1]
class AMQPConnectionError(AMQPError):
"""AMQP Connection Error."""
pass
class AMQPChannelError(AMQPError):
"""AMQP Channel Error."""
pass
class AMQPMessageError(AMQPChannelError):
"""AMQP Message Error."""
pass
class AMQPInvalidArgument(AMQPError):
"""AMQP Argument Error.""" | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/exception.py | exception.py |
import logging
from pamqp import specification
from amqpstorm.base import Handler
LOGGER = logging.getLogger(__name__)
class Tx(Handler):
"""RabbitMQ Transactions.
Server local transactions, in which the server will buffer published
messages until the client commits (or rollback) the messages.
"""
__slots__ = ['_tx_active']
def __init__(self, channel):
self._tx_active = True
super(Tx, self).__init__(channel)
def __enter__(self):
self.select()
return self
def __exit__(self, exception_type, exception_value, _):
if exception_type:
LOGGER.warning(
'Leaving Transaction on exception: %s',
exception_value
)
if self._tx_active:
self.rollback()
return
if self._tx_active:
self.commit()
def select(self):
"""Enable standard transaction mode.
This will enable transaction mode on the channel. Meaning that
messages will be kept in the remote server buffer until such a
time that either commit or rollback is called.
:return:
"""
self._tx_active = True
return self._channel.rpc_request(specification.Tx.Select())
def commit(self):
"""Commit the current transaction.
Commit all messages published during the current transaction
session to the remote server.
A new transaction session starts as soon as the command has
been executed.
:return:
"""
self._tx_active = False
return self._channel.rpc_request(specification.Tx.Commit())
def rollback(self):
"""Abandon the current transaction.
Rollback all messages published during the current transaction
session to the remote server.
Note that all messages published during this transaction session
will be lost, and will have to be published again.
A new transaction session starts as soon as the command has
been executed.
:return:
"""
self._tx_active = False
return self._channel.rpc_request(specification.Tx.Rollback()) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/tx.py | tx.py |
import logging
import select
import socket
import threading
from errno import EAGAIN
from errno import EINTR
from errno import EWOULDBLOCK
from amqpstorm import compatibility
from amqpstorm.base import MAX_FRAME_SIZE
from amqpstorm.compatibility import ssl
from amqpstorm.exception import AMQPConnectionError
EMPTY_BUFFER = bytes()
LOGGER = logging.getLogger(__name__)
POLL_TIMEOUT = 1.0
class Poller(object):
"""Socket Read Poller."""
def __init__(self, fileno, exceptions, timeout=5):
self.select = select
self._fileno = fileno
self._exceptions = exceptions
self.timeout = timeout
@property
def fileno(self):
"""Socket Fileno.
:return:
"""
return self._fileno
@property
def is_ready(self):
"""Is Socket Ready.
:rtype: tuple
"""
try:
ready, _, _ = self.select.select([self.fileno], [], [],
POLL_TIMEOUT)
return bool(ready)
except self.select.error as why:
if why.args[0] != EINTR:
self._exceptions.append(AMQPConnectionError(why))
return False
class IO(object):
"""Internal Input/Output handler."""
def __init__(self, parameters, exceptions=None, on_read_impl=None):
self._exceptions = exceptions
self._wr_lock = threading.Lock()
self._rd_lock = threading.Lock()
self._inbound_thread = None
self._on_read_impl = on_read_impl
self._running = threading.Event()
self._parameters = parameters
self.data_in = EMPTY_BUFFER
self.poller = None
self.socket = None
self.use_ssl = self._parameters['ssl']
def close(self):
"""Close Socket.
:return:
"""
self._wr_lock.acquire()
self._rd_lock.acquire()
try:
self._running.clear()
self._close_socket()
finally:
self._wr_lock.release()
self._rd_lock.release()
if self._inbound_thread:
self._inbound_thread.join(timeout=self._parameters['timeout'])
self.socket = None
self.poller = None
self._inbound_thread = None
def open(self):
"""Open Socket and establish a connection.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
self._wr_lock.acquire()
self._rd_lock.acquire()
try:
self.data_in = EMPTY_BUFFER
self._running.set()
sock_addresses = self._get_socket_addresses()
self.socket = self._find_address_and_connect(sock_addresses)
self.poller = Poller(self.socket.fileno(), self._exceptions,
timeout=self._parameters['timeout'])
self._inbound_thread = self._create_inbound_thread()
finally:
self._wr_lock.release()
self._rd_lock.release()
def write_to_socket(self, frame_data):
"""Write data to the socket.
:param str frame_data:
:return:
"""
self._wr_lock.acquire()
try:
total_bytes_written = 0
bytes_to_send = len(frame_data)
while total_bytes_written < bytes_to_send:
try:
if not self.socket:
raise socket.error('connection/socket error')
bytes_written = (
self.socket.send(frame_data[total_bytes_written:])
)
if bytes_written == 0:
raise socket.error('connection/socket error')
total_bytes_written += bytes_written
except socket.timeout:
pass
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, EAGAIN):
continue
self._exceptions.append(AMQPConnectionError(why))
return
finally:
self._wr_lock.release()
def _close_socket(self):
"""Shutdown and close the Socket.
:return:
"""
if not self.socket:
return
try:
if self.use_ssl:
self.socket.unwrap()
self.socket.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error, ValueError):
pass
self.socket.close()
def _get_socket_addresses(self):
"""Get Socket address information.
:rtype: list
"""
family = socket.AF_UNSPEC
if not socket.has_ipv6:
family = socket.AF_INET
try:
addresses = socket.getaddrinfo(self._parameters['hostname'],
self._parameters['port'], family,
socket.SOCK_STREAM)
except socket.gaierror as why:
raise AMQPConnectionError(why)
return addresses
def _find_address_and_connect(self, addresses):
"""Find and connect to the appropriate address.
:param addresses:
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: socket.socket
"""
error_message = None
for address in addresses:
sock = self._create_socket(socket_family=address[0])
try:
sock.connect(address[4])
except (IOError, OSError) as why:
error_message = why.strerror
continue
return sock
raise AMQPConnectionError(
'Could not connect to %s:%d error: %s' % (
self._parameters['hostname'], self._parameters['port'],
error_message
)
)
def _create_socket(self, socket_family):
"""Create Socket.
:param int socket_family:
:rtype: socket.socket
"""
sock = socket.socket(socket_family, socket.SOCK_STREAM, 0)
sock.settimeout(self._parameters['timeout'] or None)
if self.use_ssl:
if not compatibility.SSL_SUPPORTED:
raise AMQPConnectionError(
'Python not compiled with support for TLSv1 or higher'
)
sock = self._ssl_wrap_socket(sock)
return sock
def _ssl_wrap_socket(self, sock):
"""Wrap SSLSocket around the Socket.
:param socket.socket sock:
:rtype: SSLSocket
"""
context = self._parameters['ssl_options'].get('context')
if context is not None:
hostname = self._parameters['ssl_options'].get('server_hostname')
return context.wrap_socket(
sock, do_handshake_on_connect=True,
server_hostname=hostname
)
hostname = self._parameters['hostname']
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
mode = self._parameters['ssl_options'].get('verify_mode', 'none')
if mode.lower() == 'required':
context.verify_mode = ssl.CERT_REQUIRED
else:
context.verify_mode = ssl.CERT_NONE
check = self._parameters['ssl_options'].get('check_hostname', False)
context.check_hostname = check
context.load_default_certs()
return context.wrap_socket(sock, do_handshake_on_connect=True,
server_hostname=hostname)
def _create_inbound_thread(self):
"""Internal Thread that handles all incoming traffic.
:rtype: threading.Thread
"""
inbound_thread = threading.Thread(target=self._process_incoming_data,
name=__name__)
inbound_thread.daemon = True
inbound_thread.start()
return inbound_thread
def _process_incoming_data(self):
"""Retrieve and process any incoming data.
:return:
"""
while self._running.is_set():
if self.poller.is_ready:
self.data_in += self._receive()
self.data_in = self._on_read_impl(self.data_in)
def _receive(self):
"""Receive any incoming socket data.
If an error is thrown, handle it and return an empty string.
:return: data_in
:rtype: bytes
"""
data_in = EMPTY_BUFFER
try:
data_in = self._read_from_socket()
if len(data_in) == 0:
raise socket.error("connection closed by server")
except socket.timeout:
pass
except compatibility.SSLWantReadError:
# NOTE(visobet): Retry if the non-blocking socket does not
# have any meaningful data ready.
pass
except (IOError, OSError, ValueError) as why:
if why.args[0] not in (EWOULDBLOCK, EAGAIN):
self._exceptions.append(AMQPConnectionError(why))
if self._running.is_set():
LOGGER.warning(
"Stopping inbound thread due to %s", why,
exc_info=True
)
self._running.clear()
return data_in
def _read_from_socket(self):
"""Read data from the socket.
:rtype: bytes
"""
if not self.use_ssl:
if not self.socket:
raise socket.error('connection/socket error')
return self.socket.recv(MAX_FRAME_SIZE)
with self._rd_lock:
if not self.socket:
raise socket.error('connection/socket error')
return self.socket.read(MAX_FRAME_SIZE) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/io.py | io.py |
import logging
from amqpstorm import compatibility
from amqpstorm.compatibility import ssl
from amqpstorm.compatibility import urlparse
from amqpstorm.connection import Connection
from amqpstorm.connection import DEFAULT_HEARTBEAT_INTERVAL
from amqpstorm.connection import DEFAULT_SOCKET_TIMEOUT
from amqpstorm.connection import DEFAULT_VIRTUAL_HOST
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class UriConnection(Connection):
"""RabbitMQ Connection that takes a Uri string.
e.g.
::
import amqpstorm
connection = amqpstorm.UriConnection(
'amqp://guest:guest@localhost:5672/%2F?heartbeat=60'
)
Using a SSL Context:
::
import ssl
import amqpstorm
ssl_options = {
'context': ssl.create_default_context(cafile='ca_certificate.pem'),
'server_hostname': 'rmq.amqpstorm.io'
}
connection = amqpstorm.UriConnection(
'amqps://guest:[email protected]:5671/%2F?heartbeat=60',
ssl_options=ssl_options
)
:param str uri: AMQP Connection string
:param dict ssl_options: SSL kwargs
:param dict client_properties: None or dict of client properties
:param bool lazy: Lazy initialize the connection
:raises TypeError: Raises on invalid uri.
:raises ValueError: Raises on invalid uri.
:raises AttributeError: Raises on invalid uri.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
"""
__slots__ = []
def __init__(self, uri, ssl_options=None, client_properties=None,
lazy=False):
uri = compatibility.patch_uri(uri)
parsed_uri = urlparse.urlparse(uri)
use_ssl = parsed_uri.scheme == 'amqps' or parsed_uri.scheme == 'https'
hostname = parsed_uri.hostname or 'localhost'
port = parsed_uri.port or (5671 if use_ssl else 5672)
username = urlparse.unquote(parsed_uri.username or 'guest')
password = urlparse.unquote(parsed_uri.password or 'guest')
kwargs = self._parse_uri_options(parsed_uri, use_ssl, ssl_options)
super(UriConnection, self).__init__(
hostname, username, password, port,
client_properties=client_properties,
lazy=lazy,
**kwargs
)
def _parse_uri_options(self, parsed_uri, use_ssl=False, ssl_options=None):
"""Parse the uri options.
:param parsed_uri:
:param bool use_ssl:
:return:
"""
ssl_options = ssl_options or {}
kwargs = urlparse.parse_qs(parsed_uri.query)
vhost = urlparse.unquote(parsed_uri.path[1:]) or DEFAULT_VIRTUAL_HOST
options = {
'ssl': use_ssl,
'virtual_host': vhost,
'heartbeat': int(kwargs.pop('heartbeat',
[DEFAULT_HEARTBEAT_INTERVAL])[0]),
'timeout': int(kwargs.pop('timeout',
[DEFAULT_SOCKET_TIMEOUT])[0])
}
if use_ssl:
if not compatibility.SSL_SUPPORTED:
raise AMQPConnectionError(
'Python not compiled with support '
'for TLSv1 or higher'
)
ssl_options.update(self._parse_ssl_options(kwargs))
options['ssl_options'] = ssl_options
return options
def _parse_ssl_options(self, ssl_kwargs):
"""Parse TLS Options.
:param ssl_kwargs:
:rtype: dict
"""
ssl_options = {}
for key in ssl_kwargs:
if key not in compatibility.SSL_OPTIONS:
LOGGER.warning('invalid option: %s', key)
continue
if 'ssl_version' in key:
value = self._get_ssl_version(ssl_kwargs[key][0])
elif 'cert_reqs' in key:
value = self._get_ssl_validation(ssl_kwargs[key][0])
else:
value = ssl_kwargs[key][0]
ssl_options[key] = value
return ssl_options
def _get_ssl_version(self, value):
"""Get the TLS Version.
:param str value:
:return: TLS Version
"""
return self._get_ssl_attribute(value, compatibility.SSL_VERSIONS,
ssl.PROTOCOL_TLSv1,
'ssl_options: ssl_version \'%s\' not '
'found falling back to PROTOCOL_TLSv1.')
def _get_ssl_validation(self, value):
"""Get the TLS Validation option.
:param str value:
:return: TLS Certificate Options
"""
return self._get_ssl_attribute(value, compatibility.SSL_CERT_MAP,
ssl.CERT_NONE,
'ssl_options: cert_reqs \'%s\' not '
'found falling back to CERT_NONE.')
@staticmethod
def _get_ssl_attribute(value, mapping, default_value, warning_message):
"""Get the TLS attribute based on the compatibility mapping.
If no valid attribute can be found, fall-back on default and
display a warning.
:param str value:
:param dict mapping: Dictionary based mapping
:param default_value: Default fall-back value
:param str warning_message: Warning message
:return:
"""
for key in mapping:
if not key.endswith(value.lower()):
continue
return mapping[key]
LOGGER.warning(warning_message, value)
return default_value | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/uri_connection.py | uri_connection.py |
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_CONNECTION = 'connections/%s'
API_CONNECTIONS = 'connections'
class Connection(ManagementHandler):
def get(self, connection):
"""Get Connection details.
:param str connection: Connection name
:raises ApiError: Raises if the remote server encountered an error.
We also raise an exception if the connection cannot
be found.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_CONNECTION % connection)
def list(self, name=None, page_size=100, use_regex=False):
"""Get Connections.
:param name: Filter by name
:param use_regex: Enables regular expression for the param name
:param page_size: Number of elements per page
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
return self.http_client.list(
API_CONNECTIONS,
name=name, use_regex=use_regex, page_size=page_size,
)
def close(self, connection, reason='Closed via management api'):
"""Close Connection.
:param str connection: Connection name
:param str reason: Reason for closing connection.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
close_payload = json.dumps({
'name': connection,
'reason': reason
})
connection = quote(connection, '')
return self.http_client.delete(API_CONNECTION % connection,
payload=close_payload,
headers={
'X-Reason': reason
}) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/connection.py | connection.py |
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
from amqpstorm.message import Message
API_BASIC_GET_MESSAGE = 'queues/%s/%s/get'
API_BASIC_PUBLISH = 'exchanges/%s/%s/publish'
class Basic(ManagementHandler):
def publish(self, body, routing_key, exchange='amq.default',
virtual_host='/', properties=None, payload_encoding='string'):
"""Publish a Message.
:param bytes,str,unicode body: Message payload
:param str routing_key: Message routing key
:param str exchange: The exchange to publish the message to
:param str virtual_host: Virtual host name
:param dict properties: Message properties
:param str payload_encoding: Payload encoding.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
exchange = quote(exchange, '')
properties = properties or {}
body = json.dumps(
{
'routing_key': routing_key,
'payload': body,
'payload_encoding': payload_encoding,
'properties': properties,
'vhost': virtual_host
}
)
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_BASIC_PUBLISH %
(
virtual_host,
exchange),
payload=body)
def get(self, queue, virtual_host='/', requeue=False, to_dict=False,
count=1, truncate=50000, encoding='auto'):
"""Get Messages.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool requeue: Re-queue message
:param bool to_dict: Should incoming messages be converted to a
dictionary before delivery.
:param int count: How many messages should we try to fetch.
:param int truncate: The maximum length in bytes, beyond that the
server will truncate the message.
:param str encoding: Message encoding.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
ackmode = 'ack_requeue_false'
if requeue:
ackmode = 'ack_requeue_true'
get_messages = json.dumps(
{
'count': count,
'requeue': requeue,
'ackmode': ackmode,
'encoding': encoding,
'truncate': truncate,
'vhost': virtual_host
}
)
virtual_host = quote(virtual_host, '')
response = self.http_client.post(API_BASIC_GET_MESSAGE %
(
virtual_host,
queue
),
payload=get_messages)
if to_dict:
return response
messages = []
for message in response:
body = message.get('body')
if not body:
body = message.get('payload')
messages.append(Message(
channel=None,
body=body,
properties=message.get('properties'),
auto_decode=True,
))
return messages | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/basic.py | basic.py |
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_VIRTUAL_HOST = 'vhosts/%s'
API_VIRTUAL_HOSTS = 'vhosts'
API_VIRTUAL_HOSTS_PERMISSION = 'vhosts/%s/permissions'
class VirtualHost(ManagementHandler):
def get(self, virtual_host):
"""Get Virtual Host details.
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
We also raise an exception if the virtual host cannot
be found.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_VIRTUAL_HOST % virtual_host)
def list(self):
"""List all Virtual Hosts.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
return self.http_client.get(API_VIRTUAL_HOSTS)
def create(self, virtual_host):
"""Create a Virtual Host.
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.put(API_VIRTUAL_HOST % virtual_host)
def delete(self, virtual_host):
"""Delete a Virtual Host.
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_VIRTUAL_HOST % virtual_host)
def get_permissions(self, virtual_host):
"""Get all Virtual hosts permissions.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_VIRTUAL_HOSTS_PERMISSION %
(
virtual_host
)) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/virtual_host.py | virtual_host.py |
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_EXCHANGE = 'exchanges/%s/%s'
API_EXCHANGE_BIND = 'bindings/%s/e/%s/e/%s'
API_EXCHANGE_BINDINGS = 'exchanges/%s/%s/bindings/source'
API_EXCHANGE_UNBIND = 'bindings/%s/e/%s/e/%s/%s'
API_EXCHANGES = 'exchanges'
API_EXCHANGES_VIRTUAL_HOST = 'exchanges/%s'
class Exchange(ManagementHandler):
def get(self, exchange, virtual_host='/'):
"""Get Exchange details.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_EXCHANGE
% (
virtual_host,
exchange)
)
def list(self, virtual_host='/', show_all=False,
name=None, page_size=100, use_regex=False):
"""List Exchanges.
:param str virtual_host: Virtual host name
:param bool show_all: List Exchanges across all virtual hosts
:param name: Filter by name
:param use_regex: Enables regular expression for the param name
:param page_size: Number of elements per page
:raises ApiError: Raises if the remote server encountered an error.
We also raise an exception if the exchange cannot
be found.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.list(
API_EXCHANGES,
name=name, use_regex=use_regex, page_size=page_size,
)
virtual_host = quote(virtual_host, '')
return self.http_client.list(
API_EXCHANGES_VIRTUAL_HOST % virtual_host,
name=name, use_regex=use_regex, page_size=page_size,
)
def declare(self, exchange='', exchange_type='direct', virtual_host='/',
passive=False, durable=False, auto_delete=False,
internal=False, arguments=None):
"""Declare an Exchange.
:param str exchange: Exchange name
:param str exchange_type: Exchange type
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable exchange
:param bool auto_delete: Automatically delete when not in use
:param bool internal: Is the exchange for use by the broker only.
:param dict,None arguments: Exchange key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
if passive:
return self.get(exchange, virtual_host=virtual_host)
exchange_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'internal': internal,
'type': exchange_type,
'arguments': arguments or {},
'vhost': virtual_host
}
)
return self.http_client.put(API_EXCHANGE %
(
quote(virtual_host, ''),
exchange
),
payload=exchange_payload)
def delete(self, exchange, virtual_host='/'):
"""Delete an Exchange.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_EXCHANGE %
(
virtual_host,
exchange
))
def bindings(self, exchange, virtual_host='/'):
"""Get Exchange bindings.
:param str exchange: Exchange name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_EXCHANGE_BINDINGS %
(
virtual_host,
exchange
))
def bind(self, destination='', source='', routing_key='', virtual_host='/',
arguments=None):
"""Bind an Exchange.
:param str source: Source Exchange name
:param str destination: Destination Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param dict,None arguments: Bind key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
bind_payload = json.dumps({
'destination': destination,
'destination_type': 'e',
'routing_key': routing_key,
'source': source,
'arguments': arguments or {},
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_EXCHANGE_BIND %
(
virtual_host,
source,
destination
),
payload=bind_payload)
def unbind(self, destination='', source='', routing_key='',
virtual_host='/', properties_key=None):
"""Unbind an Exchange.
:param str source: Source Exchange name
:param str destination: Destination Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param str properties_key:
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
unbind_payload = json.dumps({
'destination': destination,
'destination_type': 'e',
'properties_key': properties_key or routing_key,
'source': source,
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_EXCHANGE_UNBIND %
(
virtual_host,
source,
destination,
properties_key or routing_key
),
payload=unbind_payload) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/exchange.py | exchange.py |
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_USER = 'users/%s'
API_USER_PERMISSIONS = 'users/%s/permissions'
API_USER_VIRTUAL_HOST_PERMISSIONS = 'permissions/%s/%s'
API_USERS = 'users'
API_USERS_BULK_DELETE = 'users/bulk-delete'
class User(ManagementHandler):
def get(self, username):
"""Get User details.
:param str username: Username
:raises ApiError: Raises if the remote server encountered an error.
We also raise an exception if the user cannot
be found.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_USER % username)
def list(self):
"""List all Users.
:rtype: list
"""
return self.http_client.get(API_USERS)
def create(self, username, password, tags=''):
"""Create User.
:param str username: Username
:param str password: Password
:param str,list tags: Comma-separate list of tags (e.g. monitoring)
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
user_payload = json.dumps({
'password': password,
'tags': tags
})
return self.http_client.put(API_USER % username,
payload=user_payload)
def delete(self, username):
"""Delete User or a list of Users.
:param str,list username: Username or a list of Usernames
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
if isinstance(username, list):
return self.http_client.post(
API_USERS_BULK_DELETE,
payload=json.dumps({'users': username})
)
return self.http_client.delete(API_USER % username)
def get_permission(self, username, virtual_host):
"""Get User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS %
(
virtual_host,
username
))
def get_permissions(self, username):
"""Get all Users permissions.
:param str username: Username
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_USER_PERMISSIONS %
(
username
))
def set_permission(self, username, virtual_host, configure_regex='.*',
write_regex='.*', read_regex='.*'):
"""Set User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:param str configure_regex: Permission pattern for configuration
operations for this user.
:param str write_regex: Permission pattern for write operations
for this user.
:param str read_regex: Permission pattern for read operations
for this user.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
permission_payload = json.dumps({
"configure": configure_regex,
"read": read_regex,
"write": write_regex
})
return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS %
(
virtual_host,
username
),
payload=permission_payload)
def delete_permission(self, username, virtual_host):
"""Delete User permissions for the configured virtual host.
:param str username: Username
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(
API_USER_VIRTUAL_HOST_PERMISSIONS %
(
virtual_host,
username
)) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/user.py | user.py |
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_QUEUE = 'queues/%s/%s'
API_QUEUE_BIND = 'bindings/%s/e/%s/q/%s'
API_QUEUE_BINDINGS = 'queues/%s/%s/bindings'
API_QUEUE_PURGE = 'queues/%s/%s/contents'
API_QUEUE_UNBIND = 'bindings/%s/e/%s/q/%s/%s'
API_QUEUES = 'queues'
API_QUEUES_VIRTUAL_HOST = 'queues/%s'
class Queue(ManagementHandler):
def get(self, queue, virtual_host='/'):
"""Get Queue details.
:param queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
We also raise an exception if the queue cannot
be found.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUE % (
virtual_host,
queue
)
)
def list(self, virtual_host='/', show_all=False,
name=None, page_size=100, use_regex=False):
"""List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List Queues across all virtual hosts
:param name: Filter by name
:param use_regex: Enables regular expression for the param name
:param page_size: Number of elements per page
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.list(
API_QUEUES,
name=name, use_regex=use_regex, page_size=page_size,
)
virtual_host = quote(virtual_host, '')
return self.http_client.list(
API_QUEUES_VIRTUAL_HOST % virtual_host,
name=name, use_regex=use_regex, page_size=page_size,
)
def declare(self, queue='', virtual_host='/', passive=False, durable=False,
auto_delete=False, arguments=None):
"""Declare a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:param bool passive: Do not create
:param bool durable: Durable queue
:param bool auto_delete: Automatically delete when not in use
:param dict,None arguments: Queue key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
if passive:
return self.get(queue, virtual_host=virtual_host)
queue_payload = json.dumps(
{
'durable': durable,
'auto_delete': auto_delete,
'arguments': arguments or {},
'vhost': virtual_host
}
)
return self.http_client.put(
API_QUEUE % (
quote(virtual_host, ''),
queue
),
payload=queue_payload)
def delete(self, queue, virtual_host='/'):
"""Delete a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE %
(
virtual_host,
queue
))
def purge(self, queue, virtual_host='/'):
"""Purge a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_PURGE %
(
virtual_host,
queue
))
def bindings(self, queue, virtual_host='/'):
"""Get Queue bindings.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_QUEUE_BINDINGS %
(
virtual_host,
queue
))
def bind(self, queue='', exchange='', routing_key='', virtual_host='/',
arguments=None):
"""Bind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param dict,None arguments: Bind key/value arguments
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
bind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'routing_key': routing_key,
'source': exchange,
'arguments': arguments or {},
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.post(API_QUEUE_BIND %
(
virtual_host,
exchange,
queue
),
payload=bind_payload)
def unbind(self, queue='', exchange='', routing_key='', virtual_host='/',
properties_key=None):
"""Unbind a Queue.
:param str queue: Queue name
:param str exchange: Exchange name
:param str routing_key: The routing key to use
:param str virtual_host: Virtual host name
:param str properties_key:
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
unbind_payload = json.dumps({
'destination': queue,
'destination_type': 'q',
'properties_key': properties_key or routing_key,
'source': exchange,
'vhost': virtual_host
})
virtual_host = quote(virtual_host, '')
return self.http_client.delete(API_QUEUE_UNBIND %
(
virtual_host,
exchange,
queue,
properties_key or routing_key
),
payload=unbind_payload) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/queue.py | queue.py |
from amqpstorm.compatibility import quote
from amqpstorm.management.basic import Basic
from amqpstorm.management.channel import Channel
from amqpstorm.management.connection import Connection
from amqpstorm.management.exchange import Exchange
from amqpstorm.management.healthchecks import HealthChecks
from amqpstorm.management.http_client import HTTPClient
from amqpstorm.management.queue import Queue
from amqpstorm.management.user import User
from amqpstorm.management.virtual_host import VirtualHost
API_ALIVENESS_TEST = 'aliveness-test/%s'
API_CLUSTER_NAME = 'cluster-name'
API_NODE = 'nodes/%s'
API_NODES = 'nodes'
API_OVERVIEW = 'overview'
API_TOP = 'top/%s'
API_WHOAMI = 'whoami'
class ManagementApi(object):
"""RabbitMQ Management Api
e.g.
::
from amqpstorm.management import ManagementApi
client = ManagementApi('https://localhost:15671', 'guest', 'guest', verify=True)
client.user.create('my_user', 'password', tags='administrator')
client.user.set_permission(
'my_user',
virtual_host='/',
configure_regex='.*',
write_regex='.*',
read_regex='.*'
)
:param str api_url: RabbitMQ Management url (e.g. https://rmq.amqpstorm.io:15671)
:param str username: Username (e.g. guest)
:param str password: Password (e.g. guest)
:param int,float timeout: TCP Timeout
:param None,str,bool verify: Requests session verify (e.g. True, False or path to CA bundle)
:param None,str,tuple cert: Requests session cert
"""
def __init__(self, api_url, username, password, timeout=10,
verify=None, cert=None):
self.http_client = HTTPClient(
api_url, username, password,
timeout=timeout, verify=verify, cert=cert
)
self._basic = Basic(self.http_client)
self._channel = Channel(self.http_client)
self._connection = Connection(self.http_client)
self._exchange = Exchange(self.http_client)
self._healthchecks = HealthChecks(self.http_client)
self._queue = Queue(self.http_client)
self._user = User(self.http_client)
self._virtual_host = VirtualHost(self.http_client)
def __enter__(self):
return self
def __exit__(self, *_):
pass
def __del__(self):
self.http_client.session.close()
@property
def basic(self):
"""RabbitMQ Basic Operations.
e.g.
::
client.basic.publish('Hello RabbitMQ', routing_key='my_queue')
:rtype: amqpstorm.management.basic.Basic
"""
return self._basic
@property
def channel(self):
"""RabbitMQ Channel Operations.
e.g.
::
client.channel.list()
:rtype: amqpstorm.management.channel.Channel
"""
return self._channel
@property
def connection(self):
"""RabbitMQ Connection Operations.
e.g.
::
client.connection.list()
:rtype: amqpstorm.management.connection.Connection
"""
return self._connection
@property
def exchange(self):
"""RabbitMQ Exchange Operations.
e.g.
::
client.exchange.declare('my_exchange')
:rtype: amqpstorm.management.exchange.Exchange
"""
return self._exchange
@property
def healthchecks(self):
"""RabbitMQ Healthchecks.
e.g.
::
client.healthchecks.get()
:rtype: amqpstorm.management.healthchecks.Healthchecks
"""
return self._healthchecks
@property
def queue(self):
"""RabbitMQ Queue Operations.
e.g.
::
client.queue.declare('my_queue', virtual_host='/')
:rtype: amqpstorm.management.queue.Queue
"""
return self._queue
@property
def user(self):
"""RabbitMQ User Operations.
e.g.
::
client.user.create('my_user', 'password')
:rtype: amqpstorm.management.user.User
"""
return self._user
@property
def virtual_host(self):
"""RabbitMQ VirtualHost Operations.
:rtype: amqpstorm.management.virtual_host.VirtualHost
"""
return self._virtual_host
def aliveness_test(self, virtual_host='/'):
"""Aliveness Test.
e.g.
::
from amqpstorm.management import ManagementApi
client = ManagementApi('http://localhost:15672', 'guest', 'guest')
result = client.aliveness_test('/')
if result['status'] == 'ok':
print("RabbitMQ is alive!")
else:
print("RabbitMQ is not alive! :(")
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_ALIVENESS_TEST %
virtual_host)
def cluster_name(self):
"""Get Cluster Name.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_CLUSTER_NAME)
def node(self, name):
"""Get Nodes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_NODE % name)
def nodes(self):
"""Get Nodes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_NODES)
def overview(self):
"""Get Overview.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_OVERVIEW)
def top(self):
"""Top Processes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
nodes = []
for node in self.nodes():
nodes.append(self.http_client.get(API_TOP % node['name']))
return nodes
def whoami(self):
"""Who am I?
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_WHOAMI) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/api.py | api.py |
import requests.api
from requests.auth import HTTPBasicAuth
from amqpstorm.compatibility import urlparse
from amqpstorm.management.exception import ApiConnectionError
from amqpstorm.management.exception import ApiError
class HTTPClient(object):
def __init__(self, api_url, username, password, verify, cert, timeout):
self.session = requests.Session()
self.session.verify = verify
self.session.cert = cert
self._auth = HTTPBasicAuth(username, password)
self._base_url = api_url
self._timeout = timeout
def get(self, path, payload=None, headers=None):
"""HTTP GET operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('get', path, payload, headers)
def list(self, path, name=None, page_size=None, use_regex=False):
"""List operation (e.g. queue list).
:param path: URI Path
:param name: Filter by name, for example queue name, exchange name etc
:param use_regex: Enables regular expression for the param name
:param page_size: Number of elements per page
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
params = dict()
if name is not None:
params['name'] = name
if use_regex:
if isinstance(use_regex, bool):
use_regex = str(use_regex)
params['use_regex'] = use_regex.lower()
if page_size is None:
return self._request('get', path, params=params)
results = list()
current_page = 1
params['page'] = current_page
params['page_size'] = page_size
params['pagination'] = True
first_result = self._request('get', path, params=params)
num_pages = first_result['page_count']
current_page = first_result.get('page', 1)
results.extend(first_result['items'])
while num_pages > current_page:
params['page'] = current_page + 1
next_result = self._request('get', path, params=params)
current_page = next_result['page']
num_pages = next_result['page_count']
items = next_result.get('items')
if not items:
break
results.extend(items)
return results
def post(self, path, payload=None, headers=None):
"""HTTP POST operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('post', path, payload, headers)
def delete(self, path, payload=None, headers=None):
"""HTTP DELETE operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('delete', path, payload, headers)
def put(self, path, payload=None, headers=None):
"""HTTP PUT operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('put', path, payload, headers)
def _request(self, method, path, payload=None, headers=None, params=None):
"""HTTP operation.
:param method: Operation type (e.g. post)
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:param params: HTTP Parameters
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
url = urlparse.urljoin(self._base_url, 'api/%s' % path)
headers = headers or {}
headers['content-type'] = 'application/json'
try:
response = self.session.request(
method, url,
auth=self._auth,
data=payload,
headers=headers,
timeout=self._timeout,
params=params,
)
except requests.RequestException as why:
raise ApiConnectionError(str(why))
json_response = self._get_json_output(response)
self._check_for_errors(response, json_response)
return json_response
@staticmethod
def _get_json_output(response):
"""Get JSON output from the HTTP response.
:param requests.Response response:
:return: Json payload
"""
try:
content = response.json()
except ValueError:
content = None
return content
@staticmethod
def _check_for_errors(response, json_response):
"""Check payload for errors.
:param response: HTTP response
:param json_response: Json response
:raises ApiError: Raises if the remote server encountered an error.
:return:
"""
status_code = response.status_code
try:
response.raise_for_status()
except requests.HTTPError as why:
raise ApiError(str(why), reply_code=status_code)
if isinstance(json_response, dict) and 'error' in json_response:
raise ApiError(json_response['error'], reply_code=status_code) | AMQPStorm | /AMQPStorm-2.10.6.tar.gz/AMQPStorm-2.10.6/amqpstorm/management/http_client.py | http_client.py |
# AMR_Summary
AMR Summary combines the outputs from [ResFinder](https://cge.cbs.dtu.dk/services/ResFinder/) and [MOB-recon](https://github.com/phac-nml/mob-suite) to yield reports with genes AMR resistance phenotypes, and whether they are present on plasmids.
## Installation
AMR_Summary can be installed using conda
Skip this step if you have already installed conda
```
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
bash miniconda.sh -b -p $HOME/miniconda
conda update -q conda
```
### Quickstart
You can now install the AMR_Summary package:
`conda install -c olc-bioinformatics amr_summary`
If you encounter the following error:
`PackageNotFoundError: Packages missing in current channels:`
You need to add one or more of the following channels to your conda install:
- conda-forge
- bioconda
- olcbioinformatics
To see which channels you currently have:
```
conda config --show channels
```
To install the missing channel(s)
```
conda config --append channels olcbioinformatics
conda config --append channels conda-forge
conda config --append channels bioconda
```
### Tests
If you encounter issues with the AMR_Summary package, tests are available to ensure that the installation was successful and your credentials are valid.
You will need to clone this repository and run the tests with pytest:
`git clone https://github.com/OLC-Bioinformatics/AMR_Summary.git`
`cd AMR_Summary`
`python -m pytest tests/ --cov=amr_summary -s -vvv`
## Running AMR_Summary
### Arguments
You can be supply absolute, tilde slash, or relative paths or all path arguments
### Required Arguments
- sequence path: name and path of folder containing sequences to process
- database path: name and path of folder containing ResFinder and MOB-recon databases. Note that you do not need to download these databases. The will be downloaded and initialised as part of the script
### Optional Arguments
- report path: name and path of folder in which reports are to be created. Default is sequence_path/reports
- debug: print debug-level logs to console
- version: print version of AMR_Summary
### Example command
To process sequence files in FASTA format in the folder `~/Analyses/sequences`, use databases in `/databases`, and place reports in your current working directory
`AMR_Summary -s ~/Analyses/sequences -d /databases -r .`
### Usage
```
usage: AMR_Summary [-h] -s -d [-r] [--debug] [-v]
AMR Summary: a pipeline to identify AMR resistance genes located on plasmids
by combining ResFinder and MOB-recon
optional arguments:
-h, --help show this help message and exit
-s , --sequence_path
Path of folder containing sequence files in FASTA
format to process
-d , --database_path
Path of folder containing ResFinder and MOB-recon
databases. If these databases cannot be located, they
will be downloaded
-r , --report_path Path of folder in which reports are to be created.
Default is sequence_path/reports
--debug Enable debug-level messages to be printed to the
console
-v, --version show program's version number and exit
``` | AMR-Summary | /AMR_Summary-0.0.1.tar.gz/AMR_Summary-0.0.1/README.md | README.md |
from amr_summary.version import __version__
from olctools.accessoryFunctions.accessoryFunctions import make_path, SetupLogging
from argparse import ArgumentParser
from subprocess import call
from glob import glob
import logging
import sys
import os
def assert_path(path_name, category):
"""
Clean up user-supplied path to allow user expansion (~). Ensure that the path exists.
:param path_name: type str: Name and path of user-supplied path
:param category: type str: Category of supplied path e.g. 'database' or 'sequence'
return clean_path type str: Name and path of the (optionally) expanded path
"""
if path_name.startswith('~'):
clean_path = os.path.expanduser(os.path.abspath(os.path.join(path_name)))
else:
clean_path = os.path.abspath(os.path.join(path_name))
try:
assert os.path.isdir(clean_path)
except AssertionError:
logging.error(f'Cannot locate supplied {category} path: {path_name}. '
f'Please ensure that you supplied the correct path.')
raise SystemExit
return clean_path
class AMRSummary(object):
def main(self):
# Ensure that the necessary databases are present in the supplied database path
self.resfinder_path, self.mob_recon_path = self.assert_databases(database_path=self.database_path)
# Perform ResFinder analyses
self.run_resfinder(
sequence_path=self.sequence_path,
database_path=self.resfinder_path,
report_path=self.report_path
)
# Perform MOB-recon analyses
self.run_mob_recon(
sequence_path=self.sequence_path,
database_path=self.database_path,
report_path=self.report_path
)
@staticmethod
def assert_databases(database_path):
"""
Ensures that the necessary databases are present in the provided database path. If not, the appropriate
database will be installed
:param database_path: type str: Name and path of folder in which the ResFinder and MOB-recon databases
are stored
:return resfinder_path: type str: Name and path of folder in which the ResFinder database is stored
:return mob_recon_path: type str: Name and path of folder in which the MOB-recon database is stored
"""
# ResFinder
resfinder_path = os.path.join(database_path, 'resfinder')
if not os.path.isdir(resfinder_path):
logging.warning(f'ResFinder database could not be located in database path: {database_path}. '
f'Installing it now.')
call(f'python -m olctools.databasesetup.database_setup -d {resfinder_path} -res', shell=True)
# MOB-recon
mob_recon_path = os.path.join(database_path, 'mob_recon')
return resfinder_path, mob_recon_path
@staticmethod
def run_resfinder(sequence_path, database_path, report_path):
"""
Use GeneSeekr to run ResFinder analyses on the sequence files
:param sequence_path: type str: Name and path of folder in which the sequence files in FASTA format are located
:param database_path: type str: Name and path of folder in which the ResFinder database is stored
:param report_path: type str: Name and path of folder in which the reports are to be created
"""
logging.info('Running ResFinder analyses')
# Run the ResFinder method of GeneSeekr
res_command = f'GeneSeekr blastn -s {sequence_path} -t {database_path} -r {report_path} -A'
logging.debug(f'ResFinder command: {res_command}')
# Run the system call
call(res_command, shell=True)
# Clean up the outputs
for resfinder_report in glob(os.path.join(report_path, '*.tsv')):
os.remove(resfinder_report)
@staticmethod
def run_mob_recon(sequence_path, database_path, report_path):
"""
Run MOB-recon on the assemblies, and create a summary report linking the AMR resistance genes identified by
ResFinder to the plasmids identified by MOB-recon
:param sequence_path: type str: Name and path of folder in which the sequence files in FASTA format are located
:param database_path: type str: Name and path of folder in which the MOB-recon database is stored
:param report_path: type str: Name and path of folder in which the reports are to be created
"""
logging.info('Running MOB-recon analyses')
# Run MOB-recon
mob_command = f'python -m genemethods.assemblypipeline.mobrecon -s {sequence_path} -r {database_path} ' \
f'-o {report_path} -p'
logging.debug(f'MOB-recon AMR Summary command: {mob_command}')
# Run the system call
call(mob_command, shell=True)
def __init__(self, sequence_path, database_path, report_path):
logging.info(f'Welcome to the CFIA AMR Summary pipeline, version {__version__}!')
# Initialise the sequence path
self.sequence_path = assert_path(
path_name=sequence_path,
category='sequence'
)
# Initialise the database path
self.database_path = assert_path(
path_name=database_path,
category='database'
)
# Report path is different; if it doesn't exist, set it to the default value of sequence_path/reports
if report_path:
if report_path.startswith('~'):
self.report_path = os.path.expanduser(os.path.abspath(os.path.join(report_path)))
else:
self.report_path = os.path.abspath(os.path.join(report_path))
else:
self.report_path = os.path.join(os.path.join(os.path.dirname(self.sequence_path), 'reports'))
try:
make_path(inpath=self.report_path)
except PermissionError:
logging.error(f'Could not create the requested report path: {self.report_path}. '
f'Please ensure that you entered a valid path, and that you have sufficient permissions '
f'to write to that folder')
raise SystemExit
self.resfinder_path = str()
self.mob_recon_path = str()
logging.debug(f'Sequence path: {self.sequence_path}')
logging.debug(f'Database path: {self.database_path}')
logging.debug(f'Report path: {self.report_path}')
def cli():
# Parser for arguments
parser = ArgumentParser(
description='AMR Summary: a pipeline to identify AMR resistance genes located on plasmids by '
'combining ResFinder and MOB-recon')
parser.add_argument(
'-s', '--sequence_path',
metavar=str(),
required=True,
help='Path of folder containing sequence files in FASTA format to process')
parser.add_argument(
'-d', '--database_path',
metavar=str(),
required=True,
help='Path of folder containing ResFinder and MOB-recon databases. If these databases cannot be '
'located, they will be downloaded')
parser.add_argument(
'-r', '--report_path',
metavar=str(),
help='Path of folder in which reports are to be created. Default is sequence_path/reports'
)
parser.add_argument(
'--debug',
action='store_true',
help='Enable debug-level messages to be printed to the console'
)
parser.add_argument(
'-v', '--version',
action='version', version=f'%(prog)s version {__version__}')
arguments = parser.parse_args()
# Set up the logging
SetupLogging(debug=arguments.debug)
amr_summary = AMRSummary(sequence_path=arguments.sequence_path,
database_path=arguments.database_path,
report_path=arguments.report_path)
amr_summary.main()
logging.info('AMR Summary complete!')
# Prevent the arguments being printed to the console (they are returned in order for the tests to work)
sys.stderr = open(os.devnull, 'w')
return arguments
if __name__ == '__main__':
cli() | AMR-Summary | /AMR_Summary-0.0.1.tar.gz/AMR_Summary-0.0.1/amr_summary/amr_summary.py | amr_summary.py |
<h1 align="center">
<br>
<a href="https://github.com/aditya-nugraha-bot/AN-DiscordBot"><img src="https://imgur.com/pY1WUFX.png" alt="AN - Discord Bot"></a>
<br>
AN Discord Bot
<br>
</h1>
<h4 align="center">Music, Moderation, Trivia, Stream Alerts and Fully Modular.</h4>
<p align="center">
<a href="https://discord.gg/WEMJKXY">
<img src="https://discordapp.com/api/guilds/133049272517001216/widget.png?style=shield" alt="Discord Server">
</a>
<a href="https://www.patreon.com/Red_Devs">
<img src="https://img.shields.io/badge/Support-Red!-yellow.svg" alt="Support Red on Patreon!">
</a>
<a href="https://www.python.org/downloads/">
<img src="https://img.shields.io/badge/Made%20With-Python%203.7-blue.svg?style=for-the-badge" alt="Made with Python 3.7">
</a>
<a href="https://crowdin.com/project/red-discordbot">
<img src="https://d322cqt584bo4o.cloudfront.net/red-discordbot/localized.svg" alt="Localized with Crowdin">
</a>
</p>
<p align="center">
<a href="https://github.com/ambv/black">
<img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="Code Style: Black">
</a>
<a href="http://makeapullrequest.com">
<img src="https://img.shields.io/badge/PRs-welcome-brightgreen.svg">
</a>
</p>
<p align="center">
<a href="#overview">Overview</a>
•
<a href="#installation">Installation</a>
•
<a href="http://red-discordbot.readthedocs.io/en/v3-develop/index.html">Documentation</a>
•
<a href="#plugins">Plugins</a>
•
<a href="#join-the-community">Community</a>
•
<a href="#license">License</a>
</p>
# Overview
AN is a fully modular bot – meaning all features and commands can be enabled/disabled to your
liking, making it completely customizable. This is also a *self-hosted bot* – meaning you will need
to host and maintain your own instance. You can turn AN into an admin bot, music bot, trivia bot,
new best friend or all of these together!
[Installation](#installation) is easy, and you do **NOT** need to know anything about coding! Aside
from installation and updating, every part of the bot can be controlled from within Discord.
**The default set of modules includes and is not limited to:**
- Customisable command permissions
**Additionally, other [plugins](#plugins) (cogs) can be easily found and added from our growing
community of cog repositories.**
# Installation
**The following platforms are officially supported:**
- Later
# Plugins
AN is fully modular, allowing you to load and unload plugins of your choice, and install 3rd party
plugins directly from Discord! A few examples are:
- Cleverbot integration (talk to Red and she talks back)
- Ban sync
- Welcome messages
- Casino
- Reaction roles
- Slow Mode
- Anilist
- And much, much more!
Feel free to take a [peek](https://cogboard.red/t/approved-repositories/210) at a list of
available 3rd party cogs!
# Join the community!
**AN** is in continuous development, and it’s supported by an active community which produces new
content (cogs/plugins) for everyone to enjoy. New features are constantly added.
Join us on our [Official Discord Server](https://discord.gg/WEMJKXY)!
# License
Released under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.en.html) license.
Red is named after the main character of "Transistor", a video game by
[Super Giant Games](https://www.supergiantgames.com/games/transistor/).
Artwork created by [Sinlaire](https://sinlaire.deviantart.com/) on Deviant Art for the Red Discord
Bot Project.
| AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/README.md | README.md |
import logging.handlers
import pathlib
import re
import sys
from typing import List, Tuple, Optional
MAX_OLD_LOGS = 8
class RotatingFileHandler(logging.handlers.RotatingFileHandler):
"""Custom rotating file handler.
This file handler rotates a bit differently to the one in stdlib.
For a start, this works off of a "stem" and a "directory". The stem
is the base name of the log file, without the extension. The
directory is where all log files (including backups) will be placed.
Secondly, this logger rotates files downwards, and new logs are
*started* with the backup number incremented. The stdlib handler
rotates files upwards, and this leaves the logs in reverse order.
Thirdly, naming conventions are not customisable with this class.
Logs will initially be named in the format "{stem}.log", and after
rotating, the first log file will be renamed "{stem}-part1.log",
and a new file "{stem}-part2.log" will be created for logging to
continue.
A few things can't be modified in this handler: it must use append
mode, it doesn't support use of the `delay` arg, and it will ignore
custom namers and rotators.
When this handler is instantiated, it will search through the
directory for logs from previous runtimes, and will open the file
with the highest backup number to append to.
"""
def __init__(
self,
stem: str,
directory: pathlib.Path,
maxBytes: int = 0,
backupCount: int = 0,
encoding: Optional[str] = None,
) -> None:
self.baseStem = stem
self.directory = directory.resolve()
# Scan for existing files in directory, append to last part of existing log
log_part_re = re.compile(rf"{stem}-part(?P<partnum>\d+).log")
highest_part = 0
for path in directory.iterdir():
match = log_part_re.match(path.name)
if match and int(match["partnum"]) > highest_part:
highest_part = int(match["partnum"])
if highest_part:
filename = directory / f"{stem}-part{highest_part}.log"
else:
filename = directory / f"{stem}.log"
super().__init__(
filename,
mode="a",
maxBytes=maxBytes,
backupCount=backupCount,
encoding=encoding,
delay=False,
)
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = None
initial_path = self.directory / f"{self.baseStem}.log"
if self.backupCount > 0 and initial_path.exists():
initial_path.replace(self.directory / f"{self.baseStem}-part1.log")
match = re.match(
rf"{self.baseStem}(?:-part(?P<part>\d+)?)?.log", pathlib.Path(self.baseFilename).name
)
latest_part_num = int(match.groupdict(default="1").get("part", "1"))
if self.backupCount < 1:
# No backups, just delete the existing log and start again
pathlib.Path(self.baseFilename).unlink()
elif latest_part_num > self.backupCount:
# Rotate files down one
# an-part2.log becomes an-part1.log etc, a new log is added at the end.
for i in range(1, self.backupCount):
next_log = self.directory / f"{self.baseStem}-part{i + 1}.log"
if next_log.exists():
prev_log = self.directory / f"{self.baseStem}-part{i}.log"
next_log.replace(prev_log)
else:
# Simply start a new file
self.baseFilename = str(
self.directory / f"{self.baseStem}-part{latest_part_num + 1}.log"
)
self.stream = self._open()
def init_logging(level: int, location: pathlib.Path) -> None:
dpy_logger = logging.getLogger("discord")
dpy_logger.setLevel(logging.WARNING)
base_logger = logging.getLogger("an")
base_logger.setLevel(level)
formatter = logging.Formatter(
"[{asctime}] [{levelname}] {name}: {message}", datefmt="%Y-%m-%d %H:%M:%S", style="{"
)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
base_logger.addHandler(stdout_handler)
dpy_logger.addHandler(stdout_handler)
if not location.exists():
location.mkdir(parents=True, exist_ok=True)
# Rotate latest logs to previous logs
previous_logs: List[pathlib.Path] = []
latest_logs: List[Tuple[pathlib.Path, str]] = []
for path in location.iterdir():
match = re.match(r"latest(?P<part>-part\d+)?\.log", path.name)
if match:
part = match.groupdict(default="")["part"]
latest_logs.append((path, part))
match = re.match(r"previous(?:-part\d+)?.log", path.name)
if match:
previous_logs.append(path)
# Delete all previous.log files
for path in previous_logs:
path.unlink()
# Rename latest.log files to previous.log
for path, part in latest_logs:
path.replace(location / f"previous{part}.log")
latest_fhandler = RotatingFileHandler(
stem="latest",
directory=location,
maxBytes=1_000_000, # About 1MB per logfile
backupCount=MAX_OLD_LOGS,
encoding="utf-8",
)
all_fhandler = RotatingFileHandler(
stem="an",
directory=location,
maxBytes=1_000_000,
backupCount=MAX_OLD_LOGS,
encoding="utf-8",
)
for fhandler in (latest_fhandler, all_fhandler):
fhandler.setFormatter(formatter)
base_logger.addHandler(fhandler) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/logging.py | logging.py |
# Discord Version check
import sys
import discord
from anbot.core.bot import AN, ExitCodes
from anbot.core.data_manager import create_temp_config, load_basic_configuration, config_file
from anbot.core.json_io import JsonIO
from anbot.core.global_checks import init_global_checks
from anbot.core.events import init_events
from anbot.core.cli import interactive_config, confirm, parse_cli_flags, ask_sentry
from anbot.core.core_commands import Core
from anbot.core import __version__
import asyncio
import logging.handlers
import logging
import os
# Let's not force this dependency, uvloop is much faster on cpython
if sys.implementation.name == "cpython":
try:
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
if sys.platform == "win32":
asyncio.set_event_loop(asyncio.ProactorEventLoop())
#
# AN - Discord Bot v3
#
# Made by Aditya Nugraha, improved by many
#
def init_loggers(cli_flags):
# d.py stuff
dpy_logger = logging.getLogger("discord")
dpy_logger.setLevel(logging.WARNING)
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
dpy_logger.addHandler(console)
# AN stuff
logger = logging.getLogger("an")
an_format = logging.Formatter(
"%(asctime)s %(levelname)s %(module)s %(funcName)s %(lineno)d: %(message)s",
datefmt="[%d/%m/%Y %H:%M]",
)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(an_format)
if cli_flags.debug:
os.environ["PYTHONASYNCIODEBUG"] = "1"
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
from anbot.core.data_manager import core_data_path
logfile_path = core_data_path() / "an.log"
fhandler = logging.handlers.RotatingFileHandler(
filename=str(logfile_path), encoding="utf-8", mode="a", maxBytes=10 ** 7, backupCount=5
)
fhandler.setFormatter(an_format)
logger.addHandler(fhandler)
logger.addHandler(stdout_handler)
# Sentry stuff
sentry_logger = logging.getLogger("an.sentry")
sentry_logger.setLevel(logging.WARNING)
return logger, sentry_logger
async def _get_prefix_and_token(an, indict):
"""
Again, please blame <@269933075037814786> for this.
:param indict:
:return:
"""
indict["token"] = await an.db.token()
indict["prefix"] = await an.db.prefix()
indict["enable_sentry"] = await an.db.enable_sentry()
def list_instances():
if not config_file.exists():
print(
"No instances have been configuan! Configure one "
"using `anbot-setup` before trying to run the bot!"
)
sys.exit(1)
else:
data = JsonIO(config_file)._load_json()
text = "Configuan Instances:\n\n"
for instance_name in sorted(data.keys()):
text += "{}\n".format(instance_name)
print(text)
sys.exit(0)
def main():
description = "AN V3"
cli_flags = parse_cli_flags(sys.argv[1:])
if cli_flags.list_instances:
list_instances()
elif cli_flags.version:
print(description)
sys.exit(0)
elif not cli_flags.instance_name and not cli_flags.no_instance:
print("Error: No instance name was provided!")
sys.exit(1)
if cli_flags.no_instance:
print(
"\033[1m"
"Warning: The data will be placed in a temporary folder and removed on next system reboot."
"\033[0m"
)
cli_flags.instance_name = "temporary_an"
create_temp_config()
load_basic_configuration(cli_flags.instance_name)
log, sentry_log = init_loggers(cli_flags)
an = AN(cli_flags=cli_flags, description=description, pm_help=None)
init_global_checks(an)
init_events(an, cli_flags)
an.add_cog(Core(an))
loop = asyncio.get_event_loop()
tmp_data = {}
loop.run_until_complete(_get_prefix_and_token(an, tmp_data))
token = os.environ.get("RED_TOKEN", tmp_data["token"])
if cli_flags.token:
token = cli_flags.token
prefix = cli_flags.prefix or tmp_data["prefix"]
if not (token and prefix):
if cli_flags.no_prompt is False:
new_token = interactive_config(an, token_set=bool(token), prefix_set=bool(prefix))
if new_token:
token = new_token
else:
log.critical("Token and prefix must be set in order to login.")
sys.exit(1)
loop.run_until_complete(_get_prefix_and_token(an, tmp_data))
if cli_flags.dry_run:
loop.run_until_complete(an.http.close())
sys.exit(0)
if tmp_data["enable_sentry"]:
an.enable_sentry()
try:
loop.run_until_complete(an.start(token, bot=True))
except discord.LoginFailure:
log.critical("This token doesn't seem to be valid.")
db_token = loop.run_until_complete(an.db.token())
if db_token and not cli_flags.no_prompt:
print("\nDo you want to reset the token? (y/n)")
if confirm("> "):
loop.run_until_complete(an.db.token.set(""))
print("Token has been reset.")
except KeyboardInterrupt:
log.info("Keyboard interrupt detected. Quitting...")
loop.run_until_complete(an.logout())
an._shutdown_mode = ExitCodes.SHUTDOWN
except Exception as e:
log.critical("Fatal exception", exc_info=e)
sentry_log.critical("Fatal Exception", exc_info=e)
loop.run_until_complete(an.logout())
finally:
pending = asyncio.Task.all_tasks(loop=an.loop)
gathean = asyncio.gather(*pending, loop=an.loop, return_exceptions=True)
gathean.cancel()
try:
loop.run_until_complete(an.rpc.close())
except AttributeError:
pass
sys.exit(an._shutdown_mode.value)
if __name__ == "__main__":
main() | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/__main__.py | __main__.py |
import re as _re
import sys as _sys
import warnings as _warnings
from math import inf as _inf
from typing import (
Any as _Any,
ClassVar as _ClassVar,
Dict as _Dict,
List as _List,
Optional as _Optional,
Pattern as _Pattern,
Tuple as _Tuple,
Union as _Union,
)
MIN_PYTHON_VERSION = (3, 7, 0)
__all__ = ["MIN_PYTHON_VERSION", "__version__", "version_info", "VersionInfo"]
if _sys.version_info < MIN_PYTHON_VERSION:
print(
f"Python {'.'.join(map(str, MIN_PYTHON_VERSION))} is required to run Red, but you have "
f"{_sys.version}! Please update Python."
)
_sys.exit(1)
class VersionInfo:
ALPHA = "alpha"
BETA = "beta"
RELEASE_CANDIDATE = "release candidate"
FINAL = "final"
_VERSION_STR_PATTERN: _ClassVar[_Pattern[str]] = _re.compile(
r"^"
r"(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<micro>0|[1-9]\d*)"
r"(?:(?P<releaselevel>a|b|rc)(?P<serial>0|[1-9]\d*))?"
r"(?:\.post(?P<post_release>0|[1-9]\d*))?"
r"(?:\.dev(?P<dev_release>0|[1-9]\d*))?"
r"$",
flags=_re.IGNORECASE,
)
_RELEASE_LEVELS: _ClassVar[_List[str]] = [ALPHA, BETA, RELEASE_CANDIDATE, FINAL]
_SHORT_RELEASE_LEVELS: _ClassVar[_Dict[str, str]] = {
"a": ALPHA,
"b": BETA,
"rc": RELEASE_CANDIDATE,
}
def __init__(
self,
major: int,
minor: int,
micro: int,
releaselevel: str,
serial: _Optional[int] = None,
post_release: _Optional[int] = None,
dev_release: _Optional[int] = None,
) -> None:
self.major: int = major
self.minor: int = minor
self.micro: int = micro
if releaselevel not in self._RELEASE_LEVELS:
raise TypeError(f"'releaselevel' must be one of: {', '.join(self._RELEASE_LEVELS)}")
self.releaselevel: str = releaselevel
self.serial: _Optional[int] = serial
self.post_release: _Optional[int] = post_release
self.dev_release: _Optional[int] = dev_release
@classmethod
def from_str(cls, version_str: str) -> "VersionInfo":
"""Parse a string into a VersionInfo object.
Raises
------
ValueError
If the version info string is invalid.
"""
match = cls._VERSION_STR_PATTERN.match(version_str)
if not match:
raise ValueError(f"Invalid version string: {version_str}")
kwargs: _Dict[str, _Union[str, int]] = {}
for key in ("major", "minor", "micro"):
kwargs[key] = int(match[key])
releaselevel = match["releaselevel"]
if releaselevel is not None:
kwargs["releaselevel"] = cls._SHORT_RELEASE_LEVELS[releaselevel]
else:
kwargs["releaselevel"] = cls.FINAL
for key in ("serial", "post_release", "dev_release"):
if match[key] is not None:
kwargs[key] = int(match[key])
return cls(**kwargs)
@classmethod
def from_json(
cls, data: _Union[_Dict[str, _Union[int, str]], _List[_Union[int, str]]]
) -> "VersionInfo":
if isinstance(data, _List):
# For old versions, data was stored as a list:
# [MAJOR, MINOR, MICRO, RELEASELEVEL, SERIAL]
return cls(*data)
else:
return cls(**data)
def to_json(self) -> _Dict[str, _Union[int, str]]:
return {
"major": self.major,
"minor": self.minor,
"micro": self.micro,
"releaselevel": self.releaselevel,
"serial": self.serial,
"post_release": self.post_release,
"dev_release": self.dev_release,
}
def _generate_comparison_tuples(
self, other: "VersionInfo"
) -> _List[
_Tuple[int, int, int, int, _Union[int, float], _Union[int, float], _Union[int, float]]
]:
tups: _List[
_Tuple[int, int, int, int, _Union[int, float], _Union[int, float], _Union[int, float]]
] = []
for obj in (self, other):
tups.append(
(
obj.major,
obj.minor,
obj.micro,
obj._RELEASE_LEVELS.index(obj.releaselevel),
obj.serial if obj.serial is not None else _inf,
obj.post_release if obj.post_release is not None else -_inf,
obj.dev_release if obj.dev_release is not None else _inf,
)
)
return tups
def __lt__(self, other: "VersionInfo") -> bool:
tups = self._generate_comparison_tuples(other)
return tups[0] < tups[1]
def __eq__(self, other: "VersionInfo") -> bool:
tups = self._generate_comparison_tuples(other)
return tups[0] == tups[1]
def __le__(self, other: "VersionInfo") -> bool:
tups = self._generate_comparison_tuples(other)
return tups[0] <= tups[1]
def __str__(self) -> str:
ret = f"{self.major}.{self.minor}.{self.micro}"
if self.releaselevel != self.FINAL:
short = next(
k for k, v in self._SHORT_RELEASE_LEVELS.items() if v == self.releaselevel
)
ret += f"{short}{self.serial}"
if self.post_release is not None:
ret += f".post{self.post_release}"
if self.dev_release is not None:
ret += f".dev{self.dev_release}"
return ret
def __repr__(self) -> str:
return (
"VersionInfo(major={major}, minor={minor}, micro={micro}, "
"releaselevel={releaselevel}, serial={serial}, post={post_release}, "
"dev={dev_release})".format(**self.to_json())
)
__version__ = "3.7.5"
version_info = VersionInfo.from_str(__version__)
# Filter fuzzywuzzy slow sequence matcher warning
_warnings.filterwarnings("ignore", module=r"fuzzywuzzy.*") | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/__init__.py | __init__.py |
import getpass
import os
import platform
import subprocess
import sys
import argparse
import asyncio
import aiohttp
import pkg_resources
from anbot.setup import (
basic_setup,
load_existing_config,
remove_instance,
remove_instance_interaction,
create_backup,
)
from anbot.core import __version__, version_info as red_version_info, VersionInfo
from anbot.core.cli import confirm
if sys.platform == "linux":
import distro
INTERACTIVE_MODE = not len(sys.argv) > 1 # CLI flags = non-interactive
INTRO = "==========================\nAN Discord Bot - Launcher\n==========================\n"
IS_WINDOWS = os.name == "nt"
IS_MAC = sys.platform == "darwin"
if IS_WINDOWS:
# Due to issues with ProactorEventLoop prior to 3.6.6 (bpo-26819)
MIN_PYTHON_VERSION = (3, 6, 6)
else:
MIN_PYTHON_VERSION = (3, 6, 2)
PYTHON_OK = sys.version_info >= MIN_PYTHON_VERSION
def is_venv():
"""Return True if the process is in a venv or in a virtualenv."""
# credit to @calebj
return hasattr(sys, "real_prefix") or (
hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix
)
def parse_cli_args():
parser = argparse.ArgumentParser(
description="AN - Discord Bot's launcher (V3)", allow_abbrev=False
)
instances = load_existing_config()
parser.add_argument(
"instancename",
metavar="instancename",
type=str,
nargs="?",
help="The instance to run",
choices=list(instances.keys()),
)
parser.add_argument("--start", "-s", help="Starts AN", action="store_true")
parser.add_argument(
"--auto-restart", help="Autorestarts AN in case of issues", action="store_true"
)
parser.add_argument("--update", help="Updates AN", action="store_true")
parser.add_argument(
"--update-dev", help="Updates AN from the Github repo", action="store_true"
)
parser.add_argument(
"--voice", help="Installs extra 'voice' when updating", action="store_true"
)
parser.add_argument("--docs", help="Installs extra 'docs' when updating", action="store_true")
parser.add_argument("--test", help="Installs extra 'test' when updating", action="store_true")
parser.add_argument(
"--mongo", help="Installs extra 'mongo' when updating", action="store_true"
)
parser.add_argument(
"--debuginfo",
help="Prints basic debug info that would be useful for support",
action="store_true",
)
return parser.parse_known_args()
def update_red(dev=False, voice=False, mongo=False, docs=False, test=False):
interpreter = sys.executable
print("Updating AN...")
# If the user ran anbot-launcher.exe, updating with pip will fail
# on windows since the file is open and pip will try to overwrite it.
# We have to rename anbot-launcher.exe in this case.
launcher_script = os.path.abspath(sys.argv[0])
old_name = launcher_script + ".exe"
new_name = launcher_script + ".old"
renamed = False
if "anbot-launcher" in launcher_script and IS_WINDOWS:
renamed = True
print("Renaming {} to {}".format(old_name, new_name))
if os.path.exists(new_name):
os.remove(new_name)
os.rename(old_name, new_name)
egg_l = []
if voice:
egg_l.append("voice")
if mongo:
egg_l.append("mongo")
if docs:
egg_l.append("docs")
if test:
egg_l.append("test")
if dev:
package = "git+https://github.com/Cog-Creators/AN-DiscordBot@V3/develop"
if egg_l:
package += "#egg=AN-DiscordBot[{}]".format(", ".join(egg_l))
else:
package = "AN-DiscordBot"
if egg_l:
package += "[{}]".format(", ".join(egg_l))
arguments = [interpreter, "-m", "pip", "install", "-U", package]
if not is_venv():
arguments.append("--user")
code = subprocess.call(arguments)
if code == 0:
print("AN has been updated")
else:
print("Something went wrong while updating!")
# If anbot wasn't updated, we renamed our .exe file and didn't replace it
scripts = os.listdir(os.path.dirname(launcher_script))
if renamed and "anbot-launcher.exe" not in scripts:
print("Renaming {} to {}".format(new_name, old_name))
os.rename(new_name, old_name)
def run_red(selected_instance, autorestart: bool = False, cliflags=None):
interpreter = sys.executable
while True:
print("Starting {}...".format(selected_instance))
cmd_list = [interpreter, "-m", "anbot", selected_instance]
if cliflags:
cmd_list += cliflags
status = subprocess.call(cmd_list)
if (not autorestart) or (autorestart and status != 26):
break
def cli_flag_getter():
print("Would you like to enter any cli flags to pass to anbot? (y/n)")
resp = user_choice()
if resp == "n":
return None
elif resp == "y":
flags = []
print("Ok, we will now walk through choosing cli flags")
print("Would you like to specify an owner? (y/n)")
print(
"Please note that the owner is normally determined automatically from "
"the bot's token, so you should only use that if you want to specify a "
"user other than that one as the owner."
)
choice = user_choice()
if choice == "y":
print("Enter the user id for the owner")
owner_id = user_choice()
flags.append("--owner {}".format(owner_id))
print("Would you like to specify any prefixes? (y/n)")
choice = user_choice()
if choice == "y":
print(
"Enter the prefixes, separated by a space (please note "
"that prefixes containing a space will need to be added with [p]set prefix)"
)
prefixes = user_choice().split()
for p in prefixes:
flags.append("-p {}".format(p))
print("Would you like mentioning the bot to be a prefix? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--mentionable")
print(
"Would you like to disable console input? Please note that features "
"requiring console interaction may fail to work (y/n)"
)
choice = user_choice()
if choice == "y":
flags.append("--no-prompt")
print("Would you like to start with no cogs loaded? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--no-cogs")
print("Do you want to do a dry run? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--dry-run")
print("Do you want to set the log level to debug? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--debug")
print(
"Do you want the Dev cog loaded (thus enabling commands such as debug and repl)? (y/n)"
)
choice = user_choice()
if choice == "y":
flags.append("--dev")
print("Do you want to enable RPC? (y/n)")
choice = user_choice()
if choice == "y":
flags.append("--rpc")
print("You have selected the following cli flags:\n\n")
print("\n".join(flags))
print("\nIf this looks good to you, type y. If you wish to start over, type n")
choice = user_choice()
if choice == "y":
print("Done selecting cli flags")
return flags
else:
print("Starting over")
return cli_flag_getter()
else:
print("Invalid response! Let's try again")
return cli_flag_getter()
def instance_menu():
instances = load_existing_config()
if not instances:
print("No instances found!")
return None
counter = 0
print("AN instance menu\n")
name_num_map = {}
for name in list(instances.keys()):
print("{}. {}\n".format(counter + 1, name))
name_num_map[str(counter + 1)] = name
counter += 1
while True:
selection = user_choice()
try:
selection = int(selection)
except ValueError:
print("Invalid input! Please enter a number corresponding to an instance.")
else:
if selection not in list(range(1, counter + 1)):
print("Invalid selection! Please try again")
else:
return name_num_map[str(selection)]
async def reset_red():
instances = load_existing_config()
if not instances:
print("No instance to delete.\n")
return
print("WARNING: You are about to remove ALL AN instances on this computer.")
print(
"If you want to reset data of only one instance, "
"please select option 5 in the launcher."
)
await asyncio.sleep(2)
print("\nIf you continue you will remove these instanes.\n")
for instance in list(instances.keys()):
print(" - {}".format(instance))
await asyncio.sleep(3)
print('\nIf you want to reset all instances, type "I agree".')
response = input("> ").strip()
if response != "I agree":
print("Cancelling...")
return
if confirm("\nDo you want to create a backup for an instance? (y/n) "):
for index, instance in instances.items():
print("\nRemoving {}...".format(index))
await create_backup(index, instance)
await remove_instance(index, instance)
else:
for index, instance in instances.items():
await remove_instance(index, instance)
print("All instances have been removed.")
def clear_screen():
if IS_WINDOWS:
os.system("cls")
else:
os.system("clear")
def wait():
if INTERACTIVE_MODE:
input("Press enter to continue.")
def user_choice():
return input("> ").lower().strip()
def extras_selector():
print("Enter any extra requirements you want installed\n")
print("Options are: voice, docs, test, mongo\n")
selected = user_choice()
selected = selected.split()
return selected
def development_choice(can_go_back=True):
while True:
print("\n")
print("Do you want to install stable or development version?")
print("1. Stable version")
print("2. Development version")
if can_go_back:
print("\n")
print("0. Go back")
choice = user_choice()
print("\n")
if choice == "1":
selected = extras_selector()
update_red(
dev=False,
voice=True if "voice" in selected else False,
docs=True if "docs" in selected else False,
test=True if "test" in selected else False,
mongo=True if "mongo" in selected else False,
)
break
elif choice == "2":
selected = extras_selector()
update_red(
dev=True,
voice=True if "voice" in selected else False,
docs=True if "docs" in selected else False,
test=True if "test" in selected else False,
mongo=True if "mongo" in selected else False,
)
break
elif choice == "0" and can_go_back:
return False
clear_screen()
return True
def debug_info():
pyver = sys.version
redver = pkg_resources.get_distribution("AN-DiscordBot").version
if IS_WINDOWS:
os_info = platform.uname()
osver = "{} {} (version {}) {}".format(
os_info.system, os_info.release, os_info.version, os_info.machine
)
elif IS_MAC:
os_info = platform.mac_ver()
osver = "Mac OSX {} {}".format(os_info[0], os_info[2])
else:
os_info = distro.linux_distribution()
osver = "{} {}".format(os_info[0], os_info[1]).strip()
user_who_ran = getpass.getuser()
info = (
"Debug Info for AN\n\n"
+ "Python version: {}\n".format(pyver)
+ "AN version: {}\n".format(redver)
+ "OS version: {}\n".format(osver)
+ "System arch: {}\n".format(platform.machine())
+ "User: {}\n".format(user_who_ran)
)
print(info)
sys.exit(0)
async def is_outdated():
red_pypi = "https://pypi.python.org/pypi/AN-DiscordBot"
async with aiohttp.ClientSession() as session:
async with session.get("{}/json".format(red_pypi)) as r:
data = await r.json()
new_version = data["info"]["version"]
return VersionInfo.from_str(new_version) > red_version_info, new_version
def main_menu():
if IS_WINDOWS:
os.system("TITLE AN - Discord Bot V3 Launcher")
clear_screen()
loop = asyncio.get_event_loop()
outdated, new_version = loop.run_until_complete(is_outdated())
while True:
print(INTRO)
print("\033[4mCurrent version:\033[0m {}".format(__version__))
if outdated:
print("AN is outdated. {} is available.".format(new_version))
print("")
print("1. Run AN w/ autorestart in case of issues")
print("2. Run AN")
print("3. Update AN")
print("4. Create Instance")
print("5. Remove Instance")
print("6. Debug information (use this if having issues with the launcher or bot)")
print("7. Reinstall AN")
print("0. Exit")
choice = user_choice()
if choice == "1":
instance = instance_menu()
if instance:
cli_flags = cli_flag_getter()
run_red(instance, autorestart=True, cliflags=cli_flags)
wait()
elif choice == "2":
instance = instance_menu()
if instance:
cli_flags = cli_flag_getter()
run_red(instance, autorestart=False, cliflags=cli_flags)
wait()
elif choice == "3":
if development_choice():
wait()
elif choice == "4":
basic_setup()
wait()
elif choice == "5":
loop.run_until_complete(remove_instance_interaction())
wait()
elif choice == "6":
debug_info()
elif choice == "7":
while True:
clear_screen()
print("==== Reinstall AN ====")
print(
"1. Reinstall AN requirements (discard code changes, keep data and 3rd party cogs)"
)
print("2. Reset all data")
print("3. Factory reset (discard code changes, reset all data)")
print("\n")
print("0. Back")
choice = user_choice()
if choice == "1":
if development_choice():
wait()
elif choice == "2":
loop.run_until_complete(reset_red())
wait()
elif choice == "3":
loop.run_until_complete(reset_red())
development_choice(can_go_back=False)
wait()
elif choice == "0":
break
elif choice == "0":
break
clear_screen()
def main():
args, flags_to_pass = parse_cli_args()
if not PYTHON_OK:
print(
f"Python {'.'.join(map(str, MIN_PYTHON_VERSION))} is required to run AN, but you "
f"have {sys.version}! Please update Python."
)
sys.exit(1)
if args.debuginfo: # Check first since the function triggers an exit
debug_info()
if args.update and args.update_dev: # Conflicting args, so error out
raise RuntimeError(
"\nUpdate requested but conflicting arguments provided.\n\n"
"Please try again using only one of --update or --update-dev"
)
if args.update:
update_red(voice=args.voice, docs=args.docs, test=args.test, mongo=args.mongo)
elif args.update_dev:
update_red(dev=True, voice=args.voice, docs=args.docs, test=args.test, mongo=args.mongo)
if INTERACTIVE_MODE:
main_menu()
elif args.start:
print("Starting AN...")
run_red(args.instancename, autorestart=args.auto_restart, cliflags=flags_to_pass)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Exiting...") | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/launcher.py | launcher.py |
import os
from datetime import datetime
from typing import List, Union
import discord
from anbot.core import Config
from anbot.core.bot import AN
from .utils.common_filters import (
filter_invites,
filter_mass_mentions,
filter_urls,
escape_spoilers,
)
__all__ = [
"Case",
"CaseType",
"get_next_case_number",
"get_case",
"get_all_cases",
"create_case",
"get_casetype",
"get_all_casetypes",
"register_casetype",
"register_casetypes",
"get_modlog_channel",
"set_modlog_channel",
"reset_cases",
]
_DEFAULT_GLOBAL = {"casetypes": {}}
_DEFAULT_GUILD = {"mod_log": None, "cases": {}, "casetypes": {}}
def _register_defaults():
_conf.register_global(**_DEFAULT_GLOBAL)
_conf.register_guild(**_DEFAULT_GUILD)
if not os.environ.get("BUILDING_DOCS"):
_conf = Config.get_conf(None, 1354799444, cog_name="ModLog")
_register_defaults()
class Case:
"""A single mod log case"""
def __init__(
self,
bot: AN,
guild: discord.Guild,
created_at: int,
action_type: str,
user: discord.User,
moderator: discord.Member,
case_number: int,
reason: str = None,
until: int = None,
channel: discord.TextChannel = None,
amended_by: discord.Member = None,
modified_at: int = None,
message: discord.Message = None,
):
self.bot = bot
self.guild = guild
self.created_at = created_at
self.action_type = action_type
self.user = user
self.moderator = moderator
self.reason = reason
self.until = until
self.channel = channel
self.amended_by = amended_by
self.modified_at = modified_at
self.case_number = case_number
self.message = message
async def edit(self, data: dict):
"""
Edits a case
Parameters
----------
data: dict
The attributes to change
"""
for item in list(data.keys()):
setattr(self, item, data[item])
await _conf.guild(self.guild).cases.set_raw(str(self.case_number), value=self.to_json())
self.bot.dispatch("modlog_case_edit", self)
async def message_content(self, embed: bool = True):
"""
Format a case message
Parameters
----------
embed: bool
Whether or not to get an embed
Returns
-------
discord.Embed or `str`
A rich embed or string representing a case message
"""
casetype = await get_casetype(self.action_type)
title = "{}".format(
"Case #{} | {} {}".format(self.case_number, casetype.case_str, casetype.image)
)
if self.reason:
reason = "**Reason:** {}".format(self.reason)
else:
reason = "**Reason:** Use `[p]reason {} <reason>` to add it".format(self.case_number)
if self.moderator is not None:
moderator = escape_spoilers(
"{}#{} ({})\n".format(
self.moderator.name, self.moderator.discriminator, self.moderator.id
)
)
else:
moderator = "Unknown"
until = None
duration = None
if self.until:
start = datetime.fromtimestamp(self.created_at)
end = datetime.fromtimestamp(self.until)
end_fmt = end.strftime("%Y-%m-%d %H:%M:%S")
duration = end - start
dur_fmt = _strfdelta(duration)
until = end_fmt
duration = dur_fmt
amended_by = None
if self.amended_by:
amended_by = escape_spoilers(
"{}#{} ({})".format(
self.amended_by.name, self.amended_by.discriminator, self.amended_by.id
)
)
last_modified = None
if self.modified_at:
last_modified = "{}".format(
datetime.fromtimestamp(self.modified_at).strftime("%Y-%m-%d %H:%M:%S")
)
user = escape_spoilers(
filter_invites(
"{}#{} ({})\n".format(self.user.name, self.user.discriminator, self.user.id)
)
) # Invites and spoilers get rendered even in embeds.
if embed:
emb = discord.Embed(title=title, description=reason)
emb.set_author(name=user, icon_url=self.user.avatar_url)
emb.add_field(name="Moderator", value=moderator, inline=False)
if until and duration:
emb.add_field(name="Until", value=until)
emb.add_field(name="Duration", value=duration)
if self.channel:
emb.add_field(name="Channel", value=self.channel.name, inline=False)
if amended_by:
emb.add_field(name="Amended by", value=amended_by)
if last_modified:
emb.add_field(name="Last modified at", value=last_modified)
emb.timestamp = datetime.fromtimestamp(self.created_at)
return emb
else:
user = filter_mass_mentions(filter_urls(user)) # Further sanitization outside embeds
case_text = ""
case_text += "{}\n".format(title)
case_text += "**User:** {}\n".format(user)
case_text += "**Moderator:** {}\n".format(moderator)
case_text += "{}\n".format(reason)
if until and duration:
case_text += "**Until:** {}\n**Duration:** {}\n".format(until, duration)
if self.channel:
case_text += "**Channel**: {}\n".format(self.channel.name)
if amended_by:
case_text += "**Amended by:** {}\n".format(amended_by)
if last_modified:
case_text += "**Last modified at:** {}\n".format(last_modified)
return case_text.strip()
def to_json(self) -> dict:
"""Transform the object to a dict
Returns
-------
dict
The case in the form of a dict
"""
if self.moderator is not None:
mod = self.moderator.id
else:
mod = None
data = {
"case_number": self.case_number,
"action_type": self.action_type,
"guild": self.guild.id,
"created_at": self.created_at,
"user": self.user.id,
"moderator": mod,
"reason": self.reason,
"until": self.until,
"channel": self.channel.id if hasattr(self.channel, "id") else None,
"amended_by": self.amended_by.id if hasattr(self.amended_by, "id") else None,
"modified_at": self.modified_at,
"message": self.message.id if hasattr(self.message, "id") else None,
}
return data
@classmethod
async def from_json(cls, mod_channel: discord.TextChannel, bot: AN, data: dict):
"""Get a Case object from the provided information
Parameters
----------
mod_channel: discord.TextChannel
The mod log channel for the guild
bot: AN
The bot's instance. Needed to get the target user
data: dict
The JSON representation of the case to be gotten
Returns
-------
Case
The case object for the requested case
"""
guild = mod_channel.guild
message = await mod_channel.get_message(data["message"])
user = await bot.get_user_info(data["user"])
moderator = guild.get_member(data["moderator"])
channel = guild.get_channel(data["channel"])
amended_by = guild.get_member(data["amended_by"])
case_guild = bot.get_guild(data["guild"])
return cls(
bot=bot,
guild=case_guild,
created_at=data["created_at"],
action_type=data["action_type"],
user=user,
moderator=moderator,
case_number=data["case_number"],
reason=data["reason"],
until=data["until"],
channel=channel,
amended_by=amended_by,
modified_at=data["modified_at"],
message=message,
)
class CaseType:
"""
A single case type
Attributes
----------
name: str
The name of the case
default_setting: bool
Whether the case type should be on (if `True`)
or off (if `False`) by default
image: str
The emoji to use for the case type (for example, :boot:)
case_str: str
The string representation of the case (example: Ban)
audit_type: `str`, optional
The action type of the action as it would appear in the
audit log
"""
def __init__(
self,
name: str,
default_setting: bool,
image: str,
case_str: str,
audit_type: str = None,
guild: discord.Guild = None,
):
self.name = name
self.default_setting = default_setting
self.image = image
self.case_str = case_str
self.audit_type = audit_type
self.guild = guild
async def to_json(self):
"""Transforms the case type into a dict and saves it"""
data = {
"default_setting": self.default_setting,
"image": self.image,
"case_str": self.case_str,
"audit_type": self.audit_type,
}
await _conf.casetypes.set_raw(self.name, value=data)
async def is_enabled(self) -> bool:
"""
Determines if the case is enabled.
If the guild is not set, this will always return False
Returns
-------
bool:
True if the guild is set and the casetype is enabled for the guild
False if the guild is not set or if the guild is set and the type
is disabled
"""
if not self.guild:
return False
return await _conf.guild(self.guild).casetypes.get_raw(
self.name, default=self.default_setting
)
async def set_enabled(self, enabled: bool):
"""
Sets the case as enabled or disabled
Parameters
----------
enabled: bool
True if the case should be enabled, otherwise False"""
if not self.guild:
return
await _conf.guild(self.guild).casetypes.set_raw(self.name, value=enabled)
@classmethod
def from_json(cls, data: dict):
"""
Parameters
----------
data: dict
The data to create an instance from
Returns
-------
CaseType
"""
return cls(**data)
async def get_next_case_number(guild: discord.Guild) -> str:
"""
Gets the next case number
Parameters
----------
guild: `discord.Guild`
The guild to get the next case number for
Returns
-------
str
The next case number
"""
cases = sorted((await _conf.guild(guild).get_raw("cases")), key=lambda x: int(x), reverse=True)
return str(int(cases[0]) + 1) if cases else "1"
async def get_case(case_number: int, guild: discord.Guild, bot: AN) -> Case:
"""
Gets the case with the associated case number
Parameters
----------
case_number: int
The case number for the case to get
guild: discord.Guild
The guild to get the case from
bot: AN
The bot's instance
Returns
-------
Case
The case associated with the case number
Raises
------
RuntimeError
If there is no case for the specified number
"""
try:
case = await _conf.guild(guild).cases.get_raw(str(case_number))
except KeyError as e:
raise RuntimeError("That case does not exist for guild {}".format(guild.name)) from e
mod_channel = await get_modlog_channel(guild)
return await Case.from_json(mod_channel, bot, case)
async def get_all_cases(guild: discord.Guild, bot: AN) -> List[Case]:
"""
Gets all cases for the specified guild
Parameters
----------
guild: `discord.Guild`
The guild to get the cases from
bot: AN
The bot's instance
Returns
-------
list
A list of all cases for the guild
"""
cases = await _conf.guild(guild).get_raw("cases")
case_numbers = list(cases.keys())
case_list = []
for case in case_numbers:
case_list.append(await get_case(case, guild, bot))
return case_list
async def create_case(
bot: AN,
guild: discord.Guild,
created_at: datetime,
action_type: str,
user: Union[discord.User, discord.Member],
moderator: discord.Member = None,
reason: str = None,
until: datetime = None,
channel: discord.TextChannel = None,
) -> Union[Case, None]:
"""
Creates a new case.
This fires an event :code:`on_modlog_case_create`
Parameters
----------
bot: `AN`
The bot object
guild: `discord.Guild`
The guild the action was taken in
created_at: datetime
The time the action occurred at
action_type: str
The type of action that was taken
user: `discord.User` or `discord.Member`
The user target by the action
moderator: `discord.Member`
The moderator who took the action
reason: str
The reason the action was taken
until: datetime
The time the action is in effect until
channel: `discord.TextChannel` or `discord.VoiceChannel`
The channel the action was taken in
"""
case_type = await get_casetype(action_type, guild)
if case_type is None:
return None
if not await case_type.is_enabled():
return None
if user == bot.user:
return None
next_case_number = int(await get_next_case_number(guild))
case = Case(
bot,
guild,
int(created_at.timestamp()),
action_type,
user,
moderator,
next_case_number,
reason,
int(until.timestamp()) if until else None,
channel,
amended_by=None,
modified_at=None,
message=None,
)
await _conf.guild(guild).cases.set_raw(str(next_case_number), value=case.to_json())
bot.dispatch("modlog_case_create", case)
return case
async def get_casetype(name: str, guild: discord.Guild = None) -> Union[CaseType, None]:
"""
Gets the case type
Parameters
----------
name: str
The name of the case type to get
guild: discord.Guild
If provided, sets the case type's guild attribute to this guild
Returns
-------
CaseType or None
"""
casetypes = await _conf.get_raw("casetypes")
if name in casetypes:
data = casetypes[name]
data["name"] = name
casetype = CaseType.from_json(data)
casetype.guild = guild
return casetype
else:
return None
async def get_all_casetypes(guild: discord.Guild = None) -> List[CaseType]:
"""
Get all currently registered case types
Returns
-------
list
A list of case types
"""
casetypes = await _conf.get_raw("casetypes", default={})
typelist = []
for ct in casetypes.keys():
data = casetypes[ct]
data["name"] = ct
casetype = CaseType.from_json(data)
casetype.guild = guild
typelist.append(casetype)
return typelist
async def register_casetype(
name: str, default_setting: bool, image: str, case_str: str, audit_type: str = None
) -> CaseType:
"""
Registers a case type. If the case type exists and
there are differences between the values passed and
what is stored already, the case type will be updated
with the new values
Parameters
----------
name: str
The name of the case
default_setting: bool
Whether the case type should be on (if `True`)
or off (if `False`) by default
image: str
The emoji to use for the case type (for example, :boot:)
case_str: str
The string representation of the case (example: Ban)
audit_type: `str`, optional
The action type of the action as it would appear in the
audit log
Returns
-------
CaseType
The case type that was registered
Raises
------
RuntimeError
If the case type is already registered
TypeError:
If a parameter is missing
ValueError
If a parameter's value is not valid
AttributeError
If the audit_type is not an attribute of `discord.AuditLogAction`
"""
if not isinstance(name, str):
raise ValueError("The 'name' is not a string! Check the value!")
if not isinstance(default_setting, bool):
raise ValueError("'default_setting' needs to be a bool!")
if not isinstance(image, str):
raise ValueError("The 'image' is not a string!")
if not isinstance(case_str, str):
raise ValueError("The 'case_str' is not a string!")
if audit_type is not None:
if not isinstance(audit_type, str):
raise ValueError("The 'audit_type' is not a string!")
try:
getattr(discord.AuditLogAction, audit_type)
except AttributeError:
raise
ct = await get_casetype(name)
if ct is None:
casetype = CaseType(name, default_setting, image, case_str, audit_type)
await casetype.to_json()
return casetype
else:
# Case type exists, so check for differences
# If no differences, raise RuntimeError
changed = False
if ct.default_setting != default_setting:
ct.default_setting = default_setting
changed = True
if ct.image != image:
ct.image = image
changed = True
if ct.case_str != case_str:
ct.case_str = case_str
changed = True
if ct.audit_type != audit_type:
ct.audit_type = audit_type
changed = True
if changed:
await ct.to_json()
return ct
else:
raise RuntimeError("That case type is already registered!")
async def register_casetypes(new_types: List[dict]) -> List[CaseType]:
"""
Registers multiple case types
Parameters
----------
new_types: list
The new types to register
Returns
-------
bool
`True` if all were registered successfully
Raises
------
RuntimeError
KeyError
ValueError
AttributeError
See Also
--------
anbot.core.modlog.register_casetype
"""
type_list = []
for new_type in new_types:
try:
ct = await register_casetype(**new_type)
except RuntimeError:
raise
except ValueError:
raise
except AttributeError:
raise
except TypeError:
raise
else:
type_list.append(ct)
else:
return type_list
async def get_modlog_channel(guild: discord.Guild) -> discord.TextChannel:
"""
Get the current modlog channel.
Parameters
----------
guild: `discord.Guild`
The guild to get the modlog channel for.
Returns
-------
`discord.TextChannel`
The channel object representing the modlog channel.
Raises
------
RuntimeError
If the modlog channel is not found.
"""
if hasattr(guild, "get_channel"):
channel = guild.get_channel(await _conf.guild(guild).mod_log())
else:
# For unit tests only
channel = await _conf.guild(guild).mod_log()
if channel is None:
raise RuntimeError("Failed to get the mod log channel!")
return channel
async def set_modlog_channel(
guild: discord.Guild, channel: Union[discord.TextChannel, None]
) -> bool:
"""
Changes the modlog channel
Parameters
----------
guild: `discord.Guild`
The guild to set a mod log channel for
channel: `discord.TextChannel` or `None`
The channel to be set as modlog channel
Returns
-------
bool
`True` if successful
"""
await _conf.guild(guild).mod_log.set(channel.id if hasattr(channel, "id") else None)
return True
async def reset_cases(guild: discord.Guild) -> bool:
"""
Wipes all modlog cases for the specified guild
Parameters
----------
guild: `discord.Guild`
The guild to reset cases for
Returns
-------
bool
`True` if successful
"""
await _conf.guild(guild).cases.set({})
return True
def _strfdelta(delta):
s = []
if delta.days:
ds = "%i day" % delta.days
if delta.days > 1:
ds += "s"
s.append(ds)
hrs, rem = divmod(delta.seconds, 60 * 60)
if hrs:
hs = "%i hr" % hrs
if hrs > 1:
hs += "s"
s.append(hs)
mins, secs = divmod(rem, 60)
if mins:
s.append("%i min" % mins)
if secs:
s.append("%i sec" % secs)
return " ".join(s) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/modlog.py | modlog.py |
import warnings
from typing import Awaitable, TYPE_CHECKING, Dict
import discord
from .commands import (
bot_has_permissions,
has_permissions,
is_owner,
guildowner,
guildowner_or_permissions,
admin,
admin_or_permissions,
mod,
mod_or_permissions,
check as _check_decorator,
)
from .utils.mod import (
is_mod_or_superior as _is_mod_or_superior,
is_admin_or_superior as _is_admin_or_superior,
check_permissions as _check_permissions,
)
if TYPE_CHECKING:
from .bot import AN
from .commands import Context
__all__ = [
"bot_has_permissions",
"has_permissions",
"is_owner",
"guildowner",
"guildowner_or_permissions",
"admin",
"admin_or_permissions",
"mod",
"mod_or_permissions",
"is_mod_or_superior",
"is_admin_or_superior",
"bot_in_a_guild",
"check_permissions",
]
def bot_in_a_guild():
"""Deny the command if the bot is not in a guild."""
async def predicate(ctx):
return len(ctx.bot.guilds) > 0
return _check_decorator(predicate)
def is_mod_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`anbot.core.checks.is_mod_or_superior` is deprecated and will be removed in a future "
"release, please use `anbot.core.utils.mod.is_mod_or_superior` instead.",
category=DeprecationWarning,
)
return _is_mod_or_superior(ctx.bot, ctx.author)
def is_admin_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`anbot.core.checks.is_admin_or_superior` is deprecated and will be removed in a future "
"release, please use `anbot.core.utils.mod.is_admin_or_superior` instead.",
category=DeprecationWarning,
)
return _is_admin_or_superior(ctx.bot, ctx.author)
def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> Awaitable[bool]:
warnings.warn(
"`anbot.core.checks.check_permissions` is deprecated and will be removed in a future "
"release, please use `anbot.core.utils.mod.check_permissions`."
)
return _check_permissions(ctx, perms) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/checks.py | checks.py |
import asyncio
from typing import Optional
from aiohttp import web
from aiohttp_json_rpc import JsonRpc
from aiohttp_json_rpc.rpc import JsonRpcMethod
import logging
log = logging.getLogger("red.rpc")
__all__ = ["RPC", "RPCMixin", "get_name"]
def get_name(func, prefix=""):
class_name = prefix or func.__self__.__class__.__name__.lower()
func_name = func.__name__.strip("_")
if class_name == "redrpc":
return func_name.upper()
return f"{class_name}__{func_name}".upper()
class ANRpc(JsonRpc):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_methods(("", self.get_method_info))
def _add_method(self, method, name="", prefix=""):
if not asyncio.iscoroutinefunction(method):
return
name = name or get_name(method, prefix)
self.methods[name] = JsonRpcMethod(method)
def remove_method(self, method):
meth_name = get_name(method)
new_methods = {}
for name, meth in self.methods.items():
if name != meth_name:
new_methods[name] = meth
self.methods = new_methods
def remove_methods(self, prefix: str):
new_methods = {}
for name, meth in self.methods.items():
splitted = name.split("__")
if len(splitted) < 2 or splitted[0] != prefix:
new_methods[name] = meth
self.methods = new_methods
async def get_method_info(self, request):
method_name = request.params[0]
if method_name in self.methods:
return self.methods[method_name].__doc__
return "No docstring available."
class RPC:
"""
RPC server manager.
"""
def __init__(self):
self.app = web.Application()
self._rpc = ANRpc()
self.app.router.add_route("*", "/", self._rpc.handle_request)
self._runner = web.AppRunner(self.app)
self._site: Optional[web.TCPSite] = None
async def initialize(self):
"""
Finalizes the initialization of the RPC server and allows it to begin
accepting queries.
"""
await self._runner.setup()
self._site = web.TCPSite(self._runner, host="127.0.0.1", port=6133)
await self._site.start()
log.debug("Created RPC server listener.")
async def close(self):
"""
Closes the RPC server.
"""
await self._runner.cleanup()
def add_method(self, method, prefix: str = None):
if prefix is None:
prefix = method.__self__.__class__.__name__.lower()
if not asyncio.iscoroutinefunction(method):
raise TypeError("RPC methods must be coroutines.")
self._rpc.add_methods((prefix, method))
def add_multi_method(self, *methods, prefix: str = None):
if not all(asyncio.iscoroutinefunction(m) for m in methods):
raise TypeError("RPC methods must be coroutines.")
for method in methods:
self.add_method(method, prefix=prefix)
def remove_method(self, method):
self._rpc.remove_method(method)
def remove_methods(self, prefix: str):
self._rpc.remove_methods(prefix)
class RPCMixin:
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rpc = RPC()
self.rpc_handlers = {} # Uppercase cog name to method
def register_rpc_handler(self, method):
"""
Registers a method to act as an RPC handler if the internal RPC server is active.
When calling this method through the RPC server, use the naming scheme
"cogname__methodname".
.. important::
All parameters to RPC handler methods must be JSON serializable objects.
The return value of handler methods must also be JSON serializable.
Parameters
----------
method : coroutine
The method to register with the internal RPC server.
"""
self.rpc.add_method(method)
cog_name = method.__self__.__class__.__name__.upper()
if cog_name not in self.rpc_handlers:
self.rpc_handlers[cog_name] = []
self.rpc_handlers[cog_name].append(method)
def unregister_rpc_handler(self, method):
"""
Unregisters an RPC method handler.
This will be called automatically for you on cog unload and will pass silently if the
method is not previously registered.
Parameters
----------
method : coroutine
The method to unregister from the internal RPC server.
"""
self.rpc.remove_method(method)
name = get_name(method)
cog_name = name.split("__")[0]
if cog_name in self.rpc_handlers:
try:
self.rpc_handlers[cog_name].remove(method)
except ValueError:
pass | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/rpc.py | rpc.py |
import contextlib
import pkgutil
from importlib import import_module, invalidate_caches
from importlib.machinery import ModuleSpec
from pathlib import Path
from typing import Union, List, Optional
import anbot.cogs
from anbot.core.utils import deduplicate_iterables
import discord
from . import checks, commands
from .config import Config
from .i18n import Translator, cog_i18n
from .data_manager import cog_data_path
from .utils.chat_formatting import box, pagify
__all__ = ["CogManager"]
class NoSuchCog(ImportError):
"""Thrown when a cog is missing.
Different from ImportError because some ImportErrors can happen inside cogs.
"""
class CogManager:
"""Directory manager for Red's cogs.
This module allows you to load cogs from multiple directories and even from
outside the bot directory. You may also set a directory for downloader to
install new cogs to, the default being the :code:`cogs/` folder in the root
bot directory.
"""
CORE_PATH = Path(anbot.cogs.__path__[0])
def __init__(self):
self.conf = Config.get_conf(self, 2938473984732, True)
tmp_cog_install_path = cog_data_path(self) / "cogs"
tmp_cog_install_path.mkdir(parents=True, exist_ok=True)
self.conf.register_global(paths=[], install_path=str(tmp_cog_install_path))
async def paths(self) -> List[Path]:
"""Get all currently valid path directories, in order of priority
Returns
-------
List[pathlib.Path]
A list of paths where cog packages can be found. The
install path is highest priority, followed by the
user-defined paths, and the core path has the lowest
priority.
"""
return deduplicate_iterables(
[await self.install_path()], await self.user_defined_paths(), [self.CORE_PATH]
)
async def install_path(self) -> Path:
"""Get the install path for 3rd party cogs.
Returns
-------
pathlib.Path
The path to the directory where 3rd party cogs are stored.
"""
return Path(await self.conf.install_path()).resolve()
async def user_defined_paths(self) -> List[Path]:
"""Get a list of user-defined cog paths.
All paths will be absolute and unique, in order of priority.
Returns
-------
List[pathlib.Path]
A list of user-defined paths.
"""
return list(map(Path, deduplicate_iterables(await self.conf.paths())))
async def set_install_path(self, path: Path) -> Path:
"""Set the install path for 3rd party cogs.
Note
----
The bot will not remember your old cog install path which means
that **all previously installed cogs** will no longer be found.
Parameters
----------
path : pathlib.Path
The new directory for cog installs.
Returns
-------
pathlib.Path
Absolute path to the new install directory.
Raises
------
ValueError
If :code:`path` is not an existing directory.
"""
if not path.is_dir():
raise ValueError("The install path must be an existing directory.")
resolved = path.resolve()
await self.conf.install_path.set(str(resolved))
return resolved
@staticmethod
def _ensure_path_obj(path: Union[Path, str]) -> Path:
"""Guarantee an object will be a path object.
Parameters
----------
path : `pathlib.Path` or `str`
Returns
-------
pathlib.Path
"""
try:
path.exists()
except AttributeError:
path = Path(path)
return path
async def add_path(self, path: Union[Path, str]) -> None:
"""Add a cog path to current list.
This will ignore duplicates.
Parameters
----------
path : `pathlib.Path` or `str`
Path to add.
Raises
------
ValueError
If :code:`path` does not resolve to an existing directory.
"""
path = self._ensure_path_obj(path)
# This makes the path absolute, will break if a bot install
# changes OS/Computer?
path = path.resolve()
if not path.is_dir():
raise ValueError("'{}' is not a valid directory.".format(path))
if path == await self.install_path():
raise ValueError("Cannot add the install path as an additional path.")
if path == self.CORE_PATH:
raise ValueError("Cannot add the core path as an additional path.")
current_paths = await self.user_defined_paths()
if path not in current_paths:
current_paths.append(path)
await self.set_paths(current_paths)
async def remove_path(self, path: Union[Path, str]) -> None:
"""Remove a path from the current paths list.
Parameters
----------
path : `pathlib.Path` or `str`
Path to remove.
"""
path = self._ensure_path_obj(path).resolve()
paths = await self.user_defined_paths()
paths.remove(path)
await self.set_paths(paths)
async def set_paths(self, paths_: List[Path]):
"""Set the current paths list.
Parameters
----------
paths_ : `list` of `pathlib.Path`
List of paths to set.
"""
str_paths = list(map(str, paths_))
await self.conf.paths.set(str_paths)
async def _find_ext_cog(self, name: str) -> ModuleSpec:
"""
Attempts to find a spec for a third party installed cog.
Parameters
----------
name : str
Name of the cog package to look for.
Returns
-------
importlib.machinery.ModuleSpec
Module spec to be used for cog loading.
Raises
------
NoSuchCog
When no cog with the requested name was found.
"""
real_paths = list(map(str, [await self.install_path()] + await self.user_defined_paths()))
for finder, module_name, _ in pkgutil.iter_modules(real_paths):
if name == module_name:
spec = finder.find_spec(name)
if spec:
return spec
raise NoSuchCog(
"No 3rd party module by the name of '{}' was found in any available path.".format(
name
),
name=name,
)
@staticmethod
async def _find_core_cog(name: str) -> ModuleSpec:
"""
Attempts to find a spec for a core cog.
Parameters
----------
name : str
Returns
-------
importlib.machinery.ModuleSpec
Raises
------
RuntimeError
When no matching spec can be found.
"""
real_name = ".{}".format(name)
package = "anbot.cogs"
try:
mod = import_module(real_name, package=package)
except ImportError as e:
if e.name == package + real_name:
raise NoSuchCog(
"No core cog by the name of '{}' could be found.".format(name),
path=e.path,
name=e.name,
) from e
raise
return mod.__spec__
# noinspection PyUnreachableCode
async def find_cog(self, name: str) -> Optional[ModuleSpec]:
"""Find a cog in the list of available paths.
Parameters
----------
name : str
Name of the cog to find.
Returns
-------
Optional[importlib.machinery.ModuleSpec]
A module spec to be used for specialized cog loading, if found.
"""
with contextlib.suppress(NoSuchCog):
return await self._find_ext_cog(name)
with contextlib.suppress(NoSuchCog):
return await self._find_core_cog(name)
async def available_modules(self) -> List[str]:
"""Finds the names of all available modules to load."""
paths = list(map(str, await self.paths()))
ret = []
for finder, module_name, _ in pkgutil.iter_modules(paths):
ret.append(module_name)
return ret
@staticmethod
def invalidate_caches():
"""Re-evaluate modules in the py cache.
This is an alias for an importlib internal and should be called
any time that a new module has been installed to a cog directory.
"""
invalidate_caches()
_ = Translator("CogManagerUI", __file__)
@cog_i18n(_)
class CogManagerUI(commands.Cog):
"""Commands to interface with Red's cog manager."""
@commands.command()
@checks.is_owner()
async def paths(self, ctx: commands.Context):
"""
Lists current cog paths in order of priority.
"""
cog_mgr = ctx.bot.cog_mgr
install_path = await cog_mgr.install_path()
core_path = cog_mgr.CORE_PATH
cog_paths = await cog_mgr.user_defined_paths()
msg = _("Install Path: {install_path}\nCore Path: {core_path}\n\n").format(
install_path=install_path, core_path=core_path
)
partial = []
for i, p in enumerate(cog_paths, start=1):
partial.append("{}. {}".format(i, p))
msg += "\n".join(partial)
await ctx.send(box(msg))
@commands.command()
@checks.is_owner()
async def addpath(self, ctx: commands.Context, path: Path):
"""
Add a path to the list of available cog paths.
"""
if not path.is_dir():
await ctx.send(_("That path does not exist or does not point to a valid directory."))
return
try:
await ctx.bot.cog_mgr.add_path(path)
except ValueError as e:
await ctx.send(str(e))
else:
await ctx.send(_("Path successfully added."))
@commands.command()
@checks.is_owner()
async def removepath(self, ctx: commands.Context, path_number: int):
"""
Removes a path from the available cog paths given the path_number
from !paths
"""
path_number -= 1
if path_number < 0:
await ctx.send(_("Path numbers must be positive."))
return
cog_paths = await ctx.bot.cog_mgr.user_defined_paths()
try:
to_remove = cog_paths.pop(path_number)
except IndexError:
await ctx.send(_("That is an invalid path number."))
return
await ctx.bot.cog_mgr.remove_path(to_remove)
await ctx.send(_("Path successfully removed."))
@commands.command()
@checks.is_owner()
async def reorderpath(self, ctx: commands.Context, from_: int, to: int):
"""
Reorders paths internally to allow discovery of different cogs.
"""
# Doing this because in the paths command they're 1 indexed
from_ -= 1
to -= 1
if from_ < 0 or to < 0:
await ctx.send(_("Path numbers must be positive."))
return
all_paths = await ctx.bot.cog_mgr.user_defined_paths()
try:
to_move = all_paths.pop(from_)
except IndexError:
await ctx.send(_("Invalid 'from' index."))
return
try:
all_paths.insert(to, to_move)
except IndexError:
await ctx.send(_("Invalid 'to' index."))
return
await ctx.bot.cog_mgr.set_paths(all_paths)
await ctx.send(_("Paths reordered."))
@commands.command()
@checks.is_owner()
async def installpath(self, ctx: commands.Context, path: Path = None):
"""
Returns the current install path or sets it if one is provided.
The provided path must be absolute or relative to the bot's
directory and it must already exist.
No installed cogs will be transferred in the process.
"""
if path:
if not path.is_absolute():
path = (ctx.bot.main_dir / path).resolve()
try:
await ctx.bot.cog_mgr.set_install_path(path)
except ValueError:
await ctx.send(_("That path does not exist."))
return
install_path = await ctx.bot.cog_mgr.install_path()
await ctx.send(
_("The bot will install new cogs to the `{}` directory.").format(install_path)
)
@commands.command()
@checks.is_owner()
async def cogs(self, ctx: commands.Context):
"""
Lists all loaded and available cogs.
"""
loaded = set(ctx.bot.extensions.keys())
all_cogs = set(await ctx.bot.cog_mgr.available_modules())
unloaded = all_cogs - loaded
loaded = sorted(list(loaded), key=str.lower)
unloaded = sorted(list(unloaded), key=str.lower)
if await ctx.embed_requested():
loaded = _("**{} loaded:**\n").format(len(loaded)) + ", ".join(loaded)
unloaded = _("**{} unloaded:**\n").format(len(unloaded)) + ", ".join(unloaded)
for page in pagify(loaded, delims=[", ", "\n"], page_length=1800):
e = discord.Embed(description=page, colour=discord.Colour.dark_green())
await ctx.send(embed=e)
for page in pagify(unloaded, delims=[", ", "\n"], page_length=1800):
e = discord.Embed(description=page, colour=discord.Colour.dark_red())
await ctx.send(embed=e)
else:
loaded_count = _("**{} loaded:**\n").format(len(loaded))
loaded = ", ".join(loaded)
unloaded_count = _("**{} unloaded:**\n").format(len(unloaded))
unloaded = ", ".join(unloaded)
loaded_count_sent = False
unloaded_count_sent = False
for page in pagify(loaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
if not loaded_count_sent:
await ctx.send(loaded_count + box(page, lang="css"))
loaded_count_sent = True
else:
await ctx.send(box(page, lang="css"))
for page in pagify(unloaded, delims=[", ", "\n"], page_length=1800):
if page.startswith(", "):
page = page[2:]
if not unloaded_count_sent:
await ctx.send(unloaded_count + box(page, lang="ldif"))
unloaded_count_sent = True
else:
await ctx.send(box(page, lang="ldif")) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/cog_manager.py | cog_manager.py |
import logging
import collections
from copy import deepcopy
from typing import Any, Union, Tuple, Dict, Awaitable, AsyncContextManager, TypeVar, TYPE_CHECKING
import discord
from .data_manager import cog_data_path, core_data_path
from .drivers import get_driver
if TYPE_CHECKING:
from .drivers.red_base import BaseDriver
log = logging.getLogger("an.config")
_T = TypeVar("_T")
class _ValueCtxManager(Awaitable[_T], AsyncContextManager[_T]):
"""Context manager implementation of config values.
This class allows mutable config values to be both "get" and "set" from
within an async context manager.
The context manager can only be used to get and set a mutable data type,
i.e. `dict`s or `list`s. This is because this class's ``raw_value``
attribute must contain a reference to the object being modified within the
context manager.
"""
def __init__(self, value_obj, coro):
self.value_obj = value_obj
self.coro = coro
self.raw_value = None
self.__original_value = None
def __await__(self):
return self.coro.__await__()
async def __aenter__(self):
self.raw_value = await self
if not isinstance(self.raw_value, (list, dict)):
raise TypeError(
"Type of retrieved value must be mutable (i.e. "
"list or dict) in order to use a config value as "
"a context manager."
)
self.__original_value = deepcopy(self.raw_value)
return self.raw_value
async def __aexit__(self, exc_type, exc, tb):
if isinstance(self.raw_value, dict):
raw_value = _str_key_dict(self.raw_value)
else:
raw_value = self.raw_value
if raw_value != self.__original_value:
await self.value_obj.set(self.raw_value)
class Value:
"""A singular "value" of data.
Attributes
----------
identifiers : Tuple[str]
This attribute provides all the keys necessary to get a specific data
element from a json document.
default
The default value for the data element that `identifiers` points at.
driver : `redbot.core.drivers.red_base.BaseDriver`
A reference to `Config.driver`.
"""
def __init__(self, identifiers: Tuple[str], default_value, driver):
self.identifiers = identifiers
self.default = default_value
self.driver = driver
async def _get(self, default=...):
try:
ret = await self.driver.get(*self.identifiers)
except KeyError:
return default if default is not ... else self.default
return ret
def __call__(self, default=...) -> _ValueCtxManager[Any]:
"""Get the literal value of this data element.
Each `Value` object is created by the `Group.__getattr__` method. The
"real" data of the `Value` object is accessed by this method. It is a
replacement for a :code:`get()` method.
The return value of this method can also be used as an asynchronous
context manager, i.e. with :code:`async with` syntax. This can only be
used on values which are mutable (namely lists and dicts), and will
set the value with its changes on exit of the context manager.
Example
-------
::
foo = await conf.guild(some_guild).foo()
# Is equivalent to this
group_obj = conf.guild(some_guild)
value_obj = conf.foo
foo = await value_obj()
.. important::
This is now, for all intents and purposes, a coroutine.
Parameters
----------
default : `object`, optional
This argument acts as an override for the registered default
provided by `default`. This argument is ignored if its
value is :code:`None`.
Returns
-------
`awaitable` mixed with `asynchronous context manager`
A coroutine object mixed in with an async context manager. When
awaited, this returns the raw data value. When used in :code:`async
with` syntax, on gets the value on entrance, and sets it on exit.
"""
return _ValueCtxManager(self, self._get(default))
async def set(self, value):
"""Set the value of the data elements pointed to by `identifiers`.
Example
-------
::
# Sets global value "foo" to False
await conf.foo.set(False)
# Sets guild specific value of "bar" to True
await conf.guild(some_guild).bar.set(True)
Parameters
----------
value
The new literal value of this attribute.
"""
if isinstance(value, dict):
value = _str_key_dict(value)
await self.driver.set(*self.identifiers, value=value)
async def clear(self):
"""
Clears the value from record for the data element pointed to by `identifiers`.
"""
await self.driver.clear(*self.identifiers)
class Group(Value):
"""
Represents a group of data, composed of more `Group` or `Value` objects.
Inherits from `Value` which means that all of the attributes and methods
available in `Value` are also available when working with a `Group` object.
Attributes
----------
defaults : `dict`
All registered default values for this Group.
force_registration : `bool`
Same as `Config.force_registration`.
driver : `redbot.core.drivers.red_base.BaseDriver`
A reference to `Config.driver`.
"""
def __init__(
self, identifiers: Tuple[str], defaults: dict, driver, force_registration: bool = False
):
self._defaults = defaults
self.force_registration = force_registration
self.driver = driver
super().__init__(identifiers, {}, self.driver)
@property
def defaults(self):
return deepcopy(self._defaults)
async def _get(self, default: Dict[str, Any] = ...) -> Dict[str, Any]:
default = default if default is not ... else self.defaults
raw = await super()._get(default)
if isinstance(raw, dict):
return self.nested_update(raw, default)
else:
return raw
# noinspection PyTypeChecker
def __getattr__(self, item: str) -> Union["Group", Value]:
"""Get an attribute of this group.
This special method is called whenever dot notation is used on this
object.
Parameters
----------
item : str
The name of the attribute being accessed.
Returns
-------
`Group` or `Value`
A child value of this Group. This, of course, can be another
`Group`, due to Config's composite pattern.
Raises
------
AttributeError
If the attribute has not been registered and `force_registration`
is set to :code:`True`.
"""
is_group = self.is_group(item)
is_value = not is_group and self.is_value(item)
new_identifiers = self.identifiers + (item,)
if is_group:
return Group(
identifiers=new_identifiers,
defaults=self._defaults[item],
driver=self.driver,
force_registration=self.force_registration,
)
elif is_value:
return Value(
identifiers=new_identifiers, default_value=self._defaults[item], driver=self.driver
)
elif self.force_registration:
raise AttributeError("'{}' is not a valid registered Group or value.".format(item))
else:
return Value(identifiers=new_identifiers, default_value=None, driver=self.driver)
async def clear_raw(self, *nested_path: Any):
"""
Allows a developer to clear data as if it was stored in a standard
Python dictionary.
For example::
await conf.clear_raw("foo", "bar")
# is equivalent to
data = {"foo": {"bar": None}}
del data["foo"]["bar"]
Parameters
----------
nested_path : Any
Multiple arguments that mirror the arguments passed in for nested
dict access. These are casted to `str` for you.
"""
path = [str(p) for p in nested_path]
await self.driver.clear(*self.identifiers, *path)
def is_group(self, item: Any) -> bool:
"""A helper method for `__getattr__`. Most developers will have no need
to use this.
Parameters
----------
item : Any
See `__getattr__`.
"""
default = self._defaults.get(str(item))
return isinstance(default, dict)
def is_value(self, item: Any) -> bool:
"""A helper method for `__getattr__`. Most developers will have no need
to use this.
Parameters
----------
item : Any
See `__getattr__`.
"""
try:
default = self._defaults[str(item)]
except KeyError:
return False
return not isinstance(default, dict)
def get_attr(self, item: Union[int, str]):
"""Manually get an attribute of this Group.
This is available to use as an alternative to using normal Python
attribute access. It may be required if you find a need for dynamic
attribute access.
Example
-------
A possible use case::
@commands.command()
async def some_command(self, ctx, item: str):
user = ctx.author
# Where the value of item is the name of the data field in Config
await ctx.send(await self.conf.user(user).get_attr(item).foo())
Parameters
----------
item : str
The name of the data field in `Config`. This is casted to
`str` for you.
Returns
-------
`Value` or `Group`
The attribute which was requested.
"""
if isinstance(item, int):
item = str(item)
return self.__getattr__(item)
async def get_raw(self, *nested_path: Any, default=...):
"""
Allows a developer to access data as if it was stored in a standard
Python dictionary.
For example::
d = await conf.get_raw("foo", "bar")
# is equivalent to
data = {"foo": {"bar": "baz"}}
d = data["foo"]["bar"]
Note
----
If retreiving a sub-group, the return value of this method will
include registered defaults for values which have not yet been set.
Parameters
----------
nested_path : str
Multiple arguments that mirror the arguments passed in for nested
dict access. These are casted to `str` for you.
default
Default argument for the value attempting to be accessed. If the
value does not exist the default will be returned.
Returns
-------
Any
The value of the path requested.
Raises
------
KeyError
If the value does not exist yet in Config's internal storage.
"""
path = [str(p) for p in nested_path]
if default is ...:
poss_default = self.defaults
for ident in path:
try:
poss_default = poss_default[ident]
except KeyError:
break
else:
default = poss_default
try:
raw = await self.driver.get(*self.identifiers, *path)
except KeyError:
if default is not ...:
return default
raise
else:
if isinstance(default, dict):
return self.nested_update(raw, default)
return raw
def all(self) -> _ValueCtxManager[Dict[str, Any]]:
"""Get a dictionary representation of this group's data.
The return value of this method can also be used as an asynchronous
context manager, i.e. with :code:`async with` syntax.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Returns
-------
dict
All of this Group's attributes, resolved as raw data values.
"""
return self()
def nested_update(
self, current: collections.Mapping, defaults: Dict[str, Any] = ...
) -> Dict[str, Any]:
"""Robust updater for nested dictionaries
If no defaults are passed, then the instance attribute 'defaults'
will be used.
"""
if defaults is ...:
defaults = self.defaults
for key, value in current.items():
if isinstance(value, collections.Mapping):
result = self.nested_update(value, defaults.get(key, {}))
defaults[key] = result
else:
defaults[key] = deepcopy(current[key])
return defaults
async def set(self, value):
if not isinstance(value, dict):
raise ValueError("You may only set the value of a group to be a dict.")
await super().set(value)
async def set_raw(self, *nested_path: Any, value):
"""
Allows a developer to set data as if it was stored in a standard
Python dictionary.
For example::
await conf.set_raw("foo", "bar", value="baz")
# is equivalent to
data = {"foo": {"bar": None}}
data["foo"]["bar"] = "baz"
Parameters
----------
nested_path : Any
Multiple arguments that mirror the arguments passed in for nested
`dict` access. These are casted to `str` for you.
value
The value to store.
"""
path = [str(p) for p in nested_path]
if isinstance(value, dict):
value = _str_key_dict(value)
await self.driver.set(*self.identifiers, *path, value=value)
class Config:
"""Configuration manager for cogs and AN.
You should always use `get_conf` or to instantiate a Config object. Use
`get_core_conf` for Config used in the core package.
.. important::
Most config data should be accessed through its respective
group method (e.g. :py:meth:`guild`) however the process for
accessing global data is a bit different. There is no
:python:`global` method because global data is accessed by
normal attribute access::
await conf.foo()
Attributes
----------
cog_name : `str`
The name of the cog that has requested a `Config` object.
unique_identifier : `int`
Unique identifier provided to differentiate cog data when name
conflicts occur.
driver
An instance of a driver that implements `redbot.core.drivers.red_base.BaseDriver`.
force_registration : `bool`
Determines if Config should throw an error if a cog attempts to access
an attribute which has not been previously registered.
Note
----
**You should use this.** By enabling force registration you give Config
the ability to alert you instantly if you've made a typo when
attempting to access data.
"""
GLOBAL = "GLOBAL"
GUILD = "GUILD"
CHANNEL = "TEXTCHANNEL"
ROLE = "ROLE"
USER = "USER"
MEMBER = "MEMBER"
def __init__(
self,
cog_name: str,
unique_identifier: str,
driver: "BaseDriver",
force_registration: bool = False,
defaults: dict = None,
):
self.cog_name = cog_name
self.unique_identifier = unique_identifier
self.driver = driver
self.force_registration = force_registration
self._defaults = defaults or {}
@property
def defaults(self):
return deepcopy(self._defaults)
@classmethod
def get_conf(cls, cog_instance, identifier: int, force_registration=False, cog_name=None):
"""Get a Config instance for your cog.
.. warning::
If you are using this classmethod to get a second instance of an
existing Config object for a particular cog, you MUST provide the
correct identifier. If you do not, you *will* screw up all other
Config instances for that cog.
Parameters
----------
cog_instance
This is an instance of your cog after it has been instantiated. If
you're calling this method from within your cog's :code:`__init__`,
this is just :code:`self`.
identifier : int
A (hard-coded) random integer, used to keep your data distinct from
any other cog with the same name.
force_registration : `bool`, optional
Should config require registration of data keys before allowing you
to get/set values? See `force_registration`.
cog_name : str, optional
Config normally uses ``cog_instance`` to determine tha name of your cog.
If you wish you may pass ``None`` to ``cog_instance`` and directly specify
the name of your cog here.
Returns
-------
Config
A new Config object.
"""
if cog_instance is None and cog_name is not None:
cog_path_override = cog_data_path(raw_name=cog_name)
else:
cog_path_override = cog_data_path(cog_instance=cog_instance)
cog_name = cog_path_override.stem
uuid = str(hash(identifier))
# We have to import this here otherwise we have a circular dependency
from .data_manager import basic_config
log.debug("Basic config: \n\n{}".format(basic_config))
driver_name = basic_config.get("STORAGE_TYPE", "JSON")
driver_details = basic_config.get("STORAGE_DETAILS", {})
log.debug("Using driver: '{}'".format(driver_name))
driver = get_driver(
driver_name, cog_name, uuid, data_path_override=cog_path_override, **driver_details
)
conf = cls(
cog_name=cog_name,
unique_identifier=uuid,
force_registration=force_registration,
driver=driver,
)
return conf
@classmethod
def get_core_conf(cls, force_registration: bool = False):
"""Get a Config instance for a core module.
All core modules that require a config instance should use this
classmethod instead of `get_conf`.
Parameters
----------
force_registration : `bool`, optional
See `force_registration`.
"""
core_path = core_data_path()
# We have to import this here otherwise we have a circular dependency
from .data_manager import basic_config
driver_name = basic_config.get("STORAGE_TYPE", "JSON")
driver_details = basic_config.get("STORAGE_DETAILS", {})
driver = get_driver(
driver_name, "Core", "0", data_path_override=core_path, **driver_details
)
conf = cls(
cog_name="Core",
driver=driver,
unique_identifier="0",
force_registration=force_registration,
)
return conf
def __getattr__(self, item: str) -> Union[Group, Value]:
"""Same as `group.__getattr__` except for global data.
Parameters
----------
item : str
The attribute you want to get.
Returns
-------
`Group` or `Value`
The value for the attribute you want to retrieve
Raises
------
AttributeError
If there is no global attribute by the given name and
`force_registration` is set to :code:`True`.
"""
global_group = self._get_base_group(self.GLOBAL)
return getattr(global_group, item)
@staticmethod
def _get_defaults_dict(key: str, value) -> dict:
"""
Since we're allowing nested config stuff now, not storing the
_defaults as a flat dict sounds like a good idea. May turn out
to be an awful one but we'll see.
"""
ret = {}
partial = ret
splitted = key.split("__")
for i, k in enumerate(splitted, start=1):
if not k.isidentifier():
raise RuntimeError("'{}' is an invalid config key.".format(k))
if i == len(splitted):
partial[k] = value
else:
partial[k] = {}
partial = partial[k]
return ret
@staticmethod
def _update_defaults(to_add: Dict[str, Any], _partial: Dict[str, Any]):
"""
This tries to update the _defaults dictionary with the nested
partial dict generated by _get_defaults_dict. This WILL
throw an error if you try to have both a value and a group
registered under the same name.
"""
for k, v in to_add.items():
val_is_dict = isinstance(v, dict)
if k in _partial:
existing_is_dict = isinstance(_partial[k], dict)
if val_is_dict != existing_is_dict:
# != is XOR
raise KeyError("You cannot register a Group and a Value under the same name.")
if val_is_dict:
Config._update_defaults(v, _partial=_partial[k])
else:
_partial[k] = v
else:
_partial[k] = v
def _register_default(self, key: str, **kwargs: Any):
if key not in self._defaults:
self._defaults[key] = {}
data = deepcopy(kwargs)
for k, v in data.items():
to_add = self._get_defaults_dict(k, v)
self._update_defaults(to_add, self._defaults[key])
def register_global(self, **kwargs):
"""Register default values for attributes you wish to store in `Config`
at a global level.
Examples
--------
You can register a single value or multiple values::
conf.register_global(
foo=True
)
conf.register_global(
bar=False,
baz=None
)
You can also now register nested values::
_defaults = {
"foo": {
"bar": True,
"baz": False
}
}
# Will register `foo.bar` == True and `foo.baz` == False
conf.register_global(
**_defaults
)
You can do the same thing without a :python:`_defaults` dict by
using double underscore as a variable name separator::
# This is equivalent to the previous example
conf.register_global(
foo__bar=True,
foo__baz=False
)
"""
self._register_default(self.GLOBAL, **kwargs)
def register_guild(self, **kwargs):
"""Register default values on a per-guild level.
See `register_global` for more details.
"""
self._register_default(self.GUILD, **kwargs)
def register_channel(self, **kwargs):
"""Register default values on a per-channel level.
See `register_global` for more details.
"""
# We may need to add a voice channel category later
self._register_default(self.CHANNEL, **kwargs)
def register_role(self, **kwargs):
"""Registers default values on a per-role level.
See `register_global` for more details.
"""
self._register_default(self.ROLE, **kwargs)
def register_user(self, **kwargs):
"""Registers default values on a per-user level.
This means that each user's data is guild-independent.
See `register_global` for more details.
"""
self._register_default(self.USER, **kwargs)
def register_member(self, **kwargs):
"""Registers default values on a per-member level.
This means that each user's data is guild-dependent.
See `register_global` for more details.
"""
self._register_default(self.MEMBER, **kwargs)
def register_custom(self, group_identifier: str, **kwargs):
"""Registers default values for a custom group.
See `register_global` for more details.
"""
self._register_default(group_identifier, **kwargs)
def _get_base_group(self, key: str, *identifiers: str) -> Group:
# noinspection PyTypeChecker
return Group(
identifiers=(key, *identifiers),
defaults=self.defaults.get(key, {}),
driver=self.driver,
force_registration=self.force_registration,
)
def guild(self, guild: discord.Guild) -> Group:
"""Returns a `Group` for the given guild.
Parameters
----------
guild : discord.Guild
A guild object.
Returns
-------
`Group <redbot.core.config.Group>`
The guild's Group object.
"""
return self._get_base_group(self.GUILD, str(guild.id))
def channel(self, channel: discord.TextChannel) -> Group:
"""Returns a `Group` for the given channel.
This does not discriminate between text and voice channels.
Parameters
----------
channel : `discord.abc.GuildChannel`
A channel object.
Returns
-------
`Group <redbot.core.config.Group>`
The channel's Group object.
"""
return self._get_base_group(self.CHANNEL, str(channel.id))
def role(self, role: discord.Role) -> Group:
"""Returns a `Group` for the given role.
Parameters
----------
role : discord.Role
A role object.
Returns
-------
`Group <redbot.core.config.Group>`
The role's Group object.
"""
return self._get_base_group(self.ROLE, str(role.id))
def user(self, user: discord.abc.User) -> Group:
"""Returns a `Group` for the given user.
Parameters
----------
user : discord.User
A user object.
Returns
-------
`Group <redbot.core.config.Group>`
The user's Group object.
"""
return self._get_base_group(self.USER, str(user.id))
def member(self, member: discord.Member) -> Group:
"""Returns a `Group` for the given member.
Parameters
----------
member : discord.Member
A member object.
Returns
-------
`Group <redbot.core.config.Group>`
The member's Group object.
"""
return self._get_base_group(self.MEMBER, str(member.guild.id), str(member.id))
def custom(self, group_identifier: str, *identifiers: str):
"""Returns a `Group` for the given custom group.
Parameters
----------
group_identifier : str
Used to identify the custom group.
identifiers : str
The attributes necessary to uniquely identify an entry in the
custom group. These are casted to `str` for you.
Returns
-------
`Group <redbot.core.config.Group>`
The custom group's Group object.
"""
return self._get_base_group(str(group_identifier), *map(str, identifiers))
async def _all_from_scope(self, scope: str) -> Dict[int, Dict[Any, Any]]:
"""Get a dict of all values from a particular scope of data.
:code:`scope` must be one of the constants attributed to
this class, i.e. :code:`GUILD`, :code:`MEMBER` et cetera.
IDs as keys in the returned dict are casted to `int` for convenience.
Default values are also mixed into the data if they have not yet been
overwritten.
"""
group = self._get_base_group(scope)
ret = {}
try:
dict_ = await self.driver.get(*group.identifiers)
except KeyError:
pass
else:
for k, v in dict_.items():
data = group.defaults
data.update(v)
ret[int(k)] = data
return ret
async def all_guilds(self) -> dict:
"""Get all guild data as a dict.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Returns
-------
dict
A dictionary in the form {`int`: `dict`} mapping
:code:`GUILD_ID -> data`.
"""
return await self._all_from_scope(self.GUILD)
async def all_channels(self) -> dict:
"""Get all channel data as a dict.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Returns
-------
dict
A dictionary in the form {`int`: `dict`} mapping
:code:`CHANNEL_ID -> data`.
"""
return await self._all_from_scope(self.CHANNEL)
async def all_roles(self) -> dict:
"""Get all role data as a dict.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Returns
-------
dict
A dictionary in the form {`int`: `dict`} mapping
:code:`ROLE_ID -> data`.
"""
return await self._all_from_scope(self.ROLE)
async def all_users(self) -> dict:
"""Get all user data as a dict.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Returns
-------
dict
A dictionary in the form {`int`: `dict`} mapping
:code:`USER_ID -> data`.
"""
return await self._all_from_scope(self.USER)
@staticmethod
def _all_members_from_guild(group: Group, guild_data: dict) -> dict:
ret = {}
for member_id, member_data in guild_data.items():
new_member_data = group.defaults
new_member_data.update(member_data)
ret[int(member_id)] = new_member_data
return ret
async def all_members(self, guild: discord.Guild = None) -> dict:
"""Get data for all members.
If :code:`guild` is specified, only the data for the members of that
guild will be returned. As such, the dict will map
:code:`MEMBER_ID -> data`. Otherwise, the dict maps
:code:`GUILD_ID -> MEMBER_ID -> data`.
Note
----
The return value of this method will include registered defaults for
values which have not yet been set.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the member data from. Can be omitted if data
from every member of all guilds is desired.
Returns
-------
dict
A dictionary of all specified member data.
"""
ret = {}
if guild is None:
group = self._get_base_group(self.MEMBER)
try:
dict_ = await self.driver.get(*group.identifiers)
except KeyError:
pass
else:
for guild_id, guild_data in dict_.items():
ret[int(guild_id)] = self._all_members_from_guild(group, guild_data)
else:
group = self._get_base_group(self.MEMBER, str(guild.id))
try:
guild_data = await self.driver.get(*group.identifiers)
except KeyError:
pass
else:
ret = self._all_members_from_guild(group, guild_data)
return ret
async def _clear_scope(self, *scopes: str):
"""Clear all data in a particular scope.
The only situation where a second scope should be passed in is if
member data from a specific guild is being cleared.
If no scopes are passed, then all data is cleared from every scope.
Parameters
----------
*scopes : str, optional
The scope of the data. Generally only one scope needs to be
provided, a second only necessary for clearing member data
of a specific guild.
**Leaving blank removes all data from this Config instance.**
"""
if not scopes:
# noinspection PyTypeChecker
group = Group(identifiers=(), defaults={}, driver=self.driver)
else:
group = self._get_base_group(*scopes)
await group.clear()
async def clear_all(self):
"""Clear all data from this Config instance.
This resets all data to its registered defaults.
.. important::
This cannot be undone.
"""
await self._clear_scope()
async def clear_all_globals(self):
"""Clear all global data.
This resets all global data to its registered defaults.
"""
await self._clear_scope(self.GLOBAL)
async def clear_all_guilds(self):
"""Clear all guild data.
This resets all guild data to its registered defaults.
"""
await self._clear_scope(self.GUILD)
async def clear_all_channels(self):
"""Clear all channel data.
This resets all channel data to its registered defaults.
"""
await self._clear_scope(self.CHANNEL)
async def clear_all_roles(self):
"""Clear all role data.
This resets all role data to its registered defaults.
"""
await self._clear_scope(self.ROLE)
async def clear_all_users(self):
"""Clear all user data.
This resets all user data to its registered defaults.
"""
await self._clear_scope(self.USER)
async def clear_all_members(self, guild: discord.Guild = None):
"""Clear all member data.
This resets all specified member data to its registered defaults.
Parameters
----------
guild : `discord.Guild`, optional
The guild to clear member data from. Omit to clear member data from
all guilds.
"""
if guild is not None:
await self._clear_scope(self.MEMBER, str(guild.id))
return
await self._clear_scope(self.MEMBER)
async def clear_all_custom(self, group_identifier: str):
"""Clear all custom group data.
This resets all custom group data to its registered defaults.
Parameters
----------
group_identifier : str
The identifier for the custom group. This is casted to
`str` for you.
"""
await self._clear_scope(str(group_identifier))
def _str_key_dict(value: Dict[Any, _T]) -> Dict[str, _T]:
"""
Recursively casts all keys in the given `dict` to `str`.
Parameters
----------
value : Dict[Any, Any]
The `dict` to cast keys to `str`.
Returns
-------
Dict[str, Any]
The `dict` with keys (and nested keys) casted to `str`.
"""
ret = {}
for k, v in value.items():
if isinstance(v, dict):
v = _str_key_dict(v)
ret[str(k)] = v
return ret | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/config.py | config.py |
import contextlib
from collections import namedtuple
from typing import List, Optional, Union
import discord
from discord.ext.commands import formatter as dpy_formatter
import inspect
import itertools
import re
from . import commands
from .i18n import Translator
from .utils.chat_formatting import pagify
from .utils import fuzzy_command_search, format_fuzzy_results
_ = Translator("Help", __file__)
EMPTY_STRING = "\u200b"
_mentions_transforms = {"@everyone": "@\u200beveryone", "@here": "@\u200bhere"}
_mention_pattern = re.compile("|".join(_mentions_transforms.keys()))
EmbedField = namedtuple("EmbedField", "name value inline")
class Help(dpy_formatter.HelpFormatter):
"""Formats help for commands."""
def __init__(self, *args, **kwargs):
self.context = None
self.command = None
super().__init__(*args, **kwargs)
@staticmethod
def pm_check(ctx):
return isinstance(ctx.channel, discord.DMChannel)
@property
def me(self):
return self.context.me
@property
def bot_all_commands(self):
return self.context.bot.all_commands
@property
def avatar(self):
return self.context.bot.user.avatar_url_as(format="png")
async def color(self):
if self.pm_check(self.context):
return self.context.bot.color
else:
return await self.context.embed_colour()
colour = color
@property
def destination(self):
if self.context.bot.pm_help:
return self.context.author
return self.context
# All the other shit
@property
def author(self):
# Get author dict with username if PM and display name in guild
if self.pm_check(self.context):
name = self.context.bot.user.name
else:
name = self.me.display_name if not "" else self.context.bot.user.name
author = {"name": "{0} Help Manual".format(name), "icon_url": self.avatar}
return author
def _add_subcommands(self, cmds):
entries = ""
for name, command in cmds:
if name in command.aliases:
# skip aliases
continue
if self.is_cog() or self.is_bot():
name = "{0}{1}".format(self.context.clean_prefix, name)
entries += "**{0}** {1}\n".format(name, command.short_doc)
return entries
def get_ending_note(self):
# command_name = self.context.invoked_with
return (
"Type {0}help <command> for more info on a command. "
"You can also type {0}help <category> for more info on a category.".format(
self.context.clean_prefix
)
)
async def format(self) -> dict:
"""Formats command for output.
Returns a dict used to build embed"""
emb = {"embed": {"title": "", "description": ""}, "footer": {"text": ""}, "fields": []}
if self.is_cog():
translator = getattr(self.command, "__translator__", lambda s: s)
description = (
inspect.cleandoc(translator(self.command.__doc__))
if self.command.__doc__
else EMPTY_STRING
)
else:
description = self.command.description
if not description == "" and description is not None:
description = "*{0}*".format(description)
if description:
# <description> portion
emb["embed"]["description"] = description[:2046]
tagline = await self.context.bot.db.help.tagline()
if tagline:
footer = tagline
else:
footer = self.get_ending_note()
emb["footer"]["text"] = footer
if isinstance(self.command, discord.ext.commands.core.Command):
# <signature portion>
emb["embed"]["title"] = emb["embed"]["description"]
emb["embed"]["description"] = "`Syntax: {0}`".format(self.get_command_signature())
# <long doc> section
if self.command.help:
splitted = self.command.help.split("\n\n")
name = "__{0}__".format(splitted[0])
value = "\n\n".join(splitted[1:]).replace("[p]", self.context.clean_prefix)
if value == "":
value = EMPTY_STRING
field = EmbedField(name[:252], value[:1024], False)
emb["fields"].append(field)
# end it here if it's just a regular command
if not self.has_subcommands():
return emb
def category(tup):
# Turn get cog (Category) name from cog/list tuples
cog = tup[1].cog_name
return "**__{0}:__**".format(cog) if cog is not None else "**__\u200bNo Category:__**"
# Get subcommands for bot or category
filtered = await self.filter_command_list()
if self.is_bot():
# Get list of non-hidden commands for bot.
data = sorted(filtered, key=category)
for category, commands_ in itertools.groupby(data, key=category):
commands_ = sorted(commands_)
if len(commands_) > 0:
for i, page in enumerate(
pagify(self._add_subcommands(commands_), page_length=1000)
):
title = category if i < 1 else f"{category} (continued)"
field = EmbedField(title, page, False)
emb["fields"].append(field)
else:
# Get list of commands for category
filtered = sorted(filtered)
if filtered:
for i, page in enumerate(
pagify(self._add_subcommands(filtered), page_length=1000)
):
title = (
"**__Commands:__**"
if not self.is_bot() and self.is_cog()
else "**__Subcommands:__**"
)
if i > 0:
title += " (continued)"
field = EmbedField(title, page, False)
emb["fields"].append(field)
return emb
@staticmethod
def group_fields(fields: List[EmbedField], max_chars=1000):
curr_group = []
ret = []
for f in fields:
curr_group.append(f)
if sum(len(f.value) for f in curr_group) > max_chars:
ret.append(curr_group)
curr_group = []
if len(curr_group) > 0:
ret.append(curr_group)
return ret
async def format_help_for(self, ctx, command_or_bot, reason: str = ""):
"""Formats the help page and handles the actual heavy lifting of how
the help command looks like. To change the behaviour, override the
:meth:`~.HelpFormatter.format` method.
Parameters
-----------
ctx: :class:`.Context`
The context of the invoked help command.
command_or_bot: :class:`.Command` or :class:`.Bot`
The bot or command that we are getting the help of.
reason : str
Returns
--------
list
A paginated output of the help command.
"""
self.context = ctx
self.command = command_or_bot
# We want the permission state to be set as if the author had run the command he is
# requesting help for. This is so the subcommands shown in the help menu correctly reflect
# any permission rules set.
if isinstance(self.command, commands.Command):
with contextlib.suppress(commands.CommandError):
await self.command.can_run(
self.context, check_all_parents=True, change_permission_state=True
)
elif isinstance(self.command, commands.Cog):
with contextlib.suppress(commands.CommandError):
# Cog's don't have a `can_run` method, so we use the `Requires` object directly.
await self.command.requires.verify(self.context)
emb = await self.format()
if reason:
emb["embed"]["title"] = reason
ret = []
page_char_limit = await ctx.bot.db.help.page_char_limit()
field_groups = self.group_fields(emb["fields"], page_char_limit)
for i, group in enumerate(field_groups, 1):
embed = discord.Embed(color=await self.color(), **emb["embed"])
if len(field_groups) > 1:
description = "{} *- Page {} of {}*".format(
embed.description, i, len(field_groups)
)
embed.description = description
embed.set_author(**self.author)
for field in group:
embed.add_field(**field._asdict())
embed.set_footer(**emb["footer"])
ret.append(embed)
return ret
async def format_command_not_found(
self, ctx: commands.Context, command_name: str
) -> Optional[Union[str, discord.Message]]:
"""Get the response for a user calling help on a missing command."""
self.context = ctx
return await default_command_not_found(
ctx,
command_name,
use_embeds=True,
colour=await self.colour(),
author=self.author,
footer={"text": self.get_ending_note()},
)
@commands.command(hidden=True)
async def help(ctx: commands.Context, *, command_name: str = ""):
"""Show help documentation.
- `[p]help`: Show the help manual.
- `[p]help command`: Show help for a command.
- `[p]help Category`: Show commands and description for a category,
"""
bot = ctx.bot
if bot.pm_help:
destination = ctx.author
else:
destination = ctx.channel
use_embeds = await ctx.embed_requested()
if use_embeds:
formatter = bot.formatter
else:
formatter = dpy_formatter.HelpFormatter()
if not command_name:
# help by itself just lists our own commands.
pages = await formatter.format_help_for(ctx, bot)
else:
# First check if it's a cog
command = bot.get_cog(command_name)
if command is None:
command = bot.get_command(command_name)
if command is None:
if hasattr(formatter, "format_command_not_found"):
msg = await formatter.format_command_not_found(ctx, command_name)
else:
msg = await default_command_not_found(ctx, command_name, use_embeds=use_embeds)
pages = [msg]
else:
pages = await formatter.format_help_for(ctx, command)
max_pages_in_guild = await ctx.bot.db.help.max_pages_in_guild()
if len(pages) > max_pages_in_guild:
destination = ctx.author
if ctx.guild and not ctx.guild.me.permissions_in(ctx.channel).send_messages:
destination = ctx.author
try:
for page in pages:
if isinstance(page, discord.Embed):
await destination.send(embed=page)
else:
await destination.send(page)
except discord.Forbidden:
await ctx.channel.send(
_(
"I couldn't send the help message to you in DM. Either you blocked me or you "
"disabled DMs in this server."
)
)
async def default_command_not_found(
ctx: commands.Context, command_name: str, *, use_embeds: bool, **embed_options
) -> Optional[Union[str, discord.Embed]]:
"""Default function for formatting the response to a missing command."""
ret = None
cmds = command_name.split()
prev_command = None
for invoked in itertools.accumulate(cmds, lambda *args: " ".join(args)):
command = ctx.bot.get_command(invoked)
if command is None:
if prev_command is not None and not isinstance(prev_command, commands.Group):
ret = _("Command *{command_name}* has no subcommands.").format(
command_name=prev_command.qualified_name
)
break
elif not await command.can_see(ctx):
return
prev_command = command
if ret is None:
fuzzy_commands = await fuzzy_command_search(ctx, command_name, min_score=75)
if fuzzy_commands:
ret = await format_fuzzy_results(ctx, fuzzy_commands, embed=use_embeds)
else:
ret = _("Command *{command_name}* not found.").format(command_name=command_name)
if use_embeds:
if isinstance(ret, str):
ret = discord.Embed(title=ret)
if "colour" in embed_options:
ret.colour = embed_options.pop("colour")
elif "color" in embed_options:
ret.colour = embed_options.pop("color")
if "author" in embed_options:
ret.set_author(**embed_options.pop("author"))
if "footer" in embed_options:
ret.set_footer(**embed_options.pop("footer"))
return ret | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/help_formatter.py | help_formatter.py |
import asyncio
import contextlib
import datetime
import importlib
import itertools
import json
import logging
import os
import sys
import tarfile
import traceback
from collections import namedtuple
from pathlib import Path
from random import SystemRandom
from string import ascii_letters, digits
from typing import TYPE_CHECKING, Union, Tuple, List, Optional, Iterable, Sequence, Dict
import aiohttp
import discord
import pkg_resources
from anbot.core import (
__version__,
version_info as an_version_info,
VersionInfo,
checks,
commands,
errors,
i18n,
)
from .utils.predicates import MessagePredicate
from .utils.chat_formatting import pagify, box, inline
if TYPE_CHECKING:
from anbot.core.bot import AN
__all__ = ["Core"]
log = logging.getLogger("an")
_ = i18n.Translator("Core", __file__)
OWNER_DISCLAIMER = (
"⚠ **Only** the person who is hosting AN should be "
"owner. **This has SERIOUS security implications. The "
"owner can access any data that is present on the host "
"system.** ⚠"
)
class CoreLogic:
def __init__(self, bot: "AN"):
self.bot = bot
self.bot.register_rpc_handler(self._load)
self.bot.register_rpc_handler(self._unload)
self.bot.register_rpc_handler(self._reload)
self.bot.register_rpc_handler(self._name)
self.bot.register_rpc_handler(self._prefixes)
self.bot.register_rpc_handler(self._version_info)
self.bot.register_rpc_handler(self._invite_url)
async def _load(
self, cog_names: Iterable[str]
) -> Tuple[List[str], List[str], List[str], List[str]]:
"""
Loads cogs by name.
Parameters
----------
cog_names : list of str
Returns
-------
tuple
4-tuple of loaded, failed, not found and already loaded cogs.
"""
failed_packages = []
loaded_packages = []
notfound_packages = []
alreadyloaded_packages = []
bot = self.bot
cogspecs = []
for name in cog_names:
try:
spec = await bot.cog_mgr.find_cog(name)
if spec:
cogspecs.append((spec, name))
else:
notfound_packages.append(name)
except Exception as e:
log.exception("Package import failed", exc_info=e)
exception_log = "Exception during import of cog\n"
exception_log += "".join(traceback.format_exception(type(e), e, e.__traceback__))
bot._last_exception = exception_log
failed_packages.append(name)
for spec, name in cogspecs:
try:
self._cleanup_and_refresh_modules(spec.name)
await bot.load_extension(spec)
except errors.PackageAlreadyLoaded:
alreadyloaded_packages.append(name)
except Exception as e:
log.exception("Package loading failed", exc_info=e)
exception_log = "Exception during loading of cog\n"
exception_log += "".join(traceback.format_exception(type(e), e, e.__traceback__))
bot._last_exception = exception_log
failed_packages.append(name)
else:
await bot.add_loaded_package(name)
loaded_packages.append(name)
return loaded_packages, failed_packages, notfound_packages, alreadyloaded_packages
@staticmethod
def _cleanup_and_refresh_modules(module_name: str) -> None:
"""Interally reloads modules so that changes are detected"""
splitted = module_name.split(".")
def maybe_reload(new_name):
try:
lib = sys.modules[new_name]
except KeyError:
pass
else:
importlib._bootstrap._exec(lib.__spec__, lib)
# noinspection PyTypeChecker
modules = itertools.accumulate(splitted, "{}.{}".format)
for m in modules:
maybe_reload(m)
children = {name: lib for name, lib in sys.modules.items() if name.startswith(module_name)}
for child_name, lib in children.items():
importlib._bootstrap._exec(lib.__spec__, lib)
@staticmethod
def _get_package_strings(
packages: List[str], fmt: str, other: Optional[Tuple[str, ...]] = None
) -> str:
"""
Gets the strings needed for the load, unload and reload commands
"""
packages = [inline(name) for name in packages]
if other is None:
other = ("", "")
plural = "s" if len(packages) > 1 else ""
use_and, other = ("", other[0]) if len(packages) == 1 else (" and ", other[1])
packages_string = ", ".join(packages[:-1]) + use_and + packages[-1]
form = {"plural": plural, "packs": packages_string, "other": other}
final_string = fmt.format(**form)
return final_string
async def _unload(self, cog_names: Iterable[str]) -> Tuple[List[str], List[str]]:
"""
Unloads cogs with the given names.
Parameters
----------
cog_names : list of str
Returns
-------
tuple
2 element tuple of successful unloads and failed unloads.
"""
failed_packages = []
unloaded_packages = []
bot = self.bot
for name in cog_names:
if name in bot.extensions:
bot.unload_extension(name)
await bot.remove_loaded_package(name)
unloaded_packages.append(name)
else:
failed_packages.append(name)
return unloaded_packages, failed_packages
async def _reload(
self, cog_names: Sequence[str]
) -> Tuple[List[str], List[str], List[str], List[str]]:
await self._unload(cog_names)
loaded, load_failed, not_found, already_loaded = await self._load(cog_names)
return loaded, load_failed, not_found, already_loaded
async def _name(self, name: Optional[str] = None) -> str:
"""
Gets or sets the bot's username.
Parameters
----------
name : str
If passed, the bot will change it's username.
Returns
-------
str
The current (or new) username of the bot.
"""
if name is not None:
await self.bot.user.edit(username=name)
return self.bot.user.name
async def _prefixes(self, prefixes: Optional[Sequence[str]] = None) -> List[str]:
"""
Gets or sets the bot's global prefixes.
Parameters
----------
prefixes : list of str
If passed, the bot will set it's global prefixes.
Returns
-------
list of str
The current (or new) list of prefixes.
"""
if prefixes:
prefixes = sorted(prefixes, reverse=True)
await self.bot.db.prefix.set(prefixes)
return await self.bot.db.prefix()
@classmethod
async def _version_info(cls) -> Dict[str, str]:
"""
Version information for AN and discord.py
Returns
-------
dict
`anbot` and `discordpy` keys containing version information for both.
"""
return {"anbot": __version__, "discordpy": discord.__version__}
async def _invite_url(self) -> str:
"""
Generates the invite URL for the bot.
Returns
-------
str
Invite URL.
"""
app_info = await self.bot.application_info()
return discord.utils.oauth_url(app_info.id)
@i18n.cog_i18n(_)
class Core(commands.Cog, CoreLogic):
"""Commands related to core functions"""
@commands.command(hidden=True)
async def ping(self, ctx: commands.Context):
"""Pong."""
await ctx.send("Pong.")
@commands.command()
async def info(self, ctx: commands.Context):
"""Shows info about AN"""
author_repo = "https://github.com/Twentysix26"
org_repo = "https://github.com/Cog-Creators"
red_repo = org_repo + "/AN-DiscordBot"
red_pypi = "https://pypi.python.org/pypi/AN-DiscordBot"
support_server_url = "https://discord.gg/red"
dpy_repo = "https://github.com/Rapptz/discord.py"
python_url = "https://www.python.org/"
since = datetime.datetime(2016, 1, 2, 0, 0)
days_since = (datetime.datetime.utcnow() - since).days
dpy_version = "[{}]({})".format(discord.__version__, dpy_repo)
python_version = "[{}.{}.{}]({})".format(*sys.version_info[:3], python_url)
red_version = "[{}]({})".format(__version__, red_pypi)
app_info = await self.bot.application_info()
owner = app_info.owner
async with aiohttp.ClientSession() as session:
async with session.get("{}/json".format(red_pypi)) as r:
data = await r.json()
outdated = VersionInfo.from_str(data["info"]["version"]) > an_version_info
about = (
"This is an instance of [AN, an open source Discord bot]({}) "
"created by [Twentysix]({}) and [improved by many]({}).\n\n"
"AN is backed by a passionate community who contributes and "
"creates content for everyone to enjoy. [Join us today]({}) "
"and help us improve!\n\n"
"".format(red_repo, author_repo, org_repo, support_server_url)
)
embed = discord.Embed(color=(await ctx.embed_colour()))
embed.add_field(name="Instance owned by", value=str(owner))
embed.add_field(name="Python", value=python_version)
embed.add_field(name="discord.py", value=dpy_version)
embed.add_field(name="AN version", value=red_version)
if outdated:
embed.add_field(
name="Outdated", value="Yes, {} is available".format(data["info"]["version"])
)
embed.add_field(name="About AN", value=about, inline=False)
embed.set_footer(
text="Bringing joy since 02 Jan 2016 (over {} days ago!)".format(days_since)
)
try:
await ctx.send(embed=embed)
except discord.HTTPException:
await ctx.send("I need the `Embed links` permission to send this")
@commands.command()
async def uptime(self, ctx: commands.Context):
"""Shows AN's uptime"""
since = ctx.bot.uptime.strftime("%Y-%m-%d %H:%M:%S")
passed = self.get_bot_uptime()
await ctx.send("Been up for: **{}** (since {} UTC)".format(passed, since))
def get_bot_uptime(self, *, brief: bool = False):
# Courtesy of Danny
now = datetime.datetime.utcnow()
delta = now - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if not brief:
if days:
fmt = "{d} days, {h} hours, {m} minutes, and {s} seconds"
else:
fmt = "{h} hours, {m} minutes, and {s} seconds"
else:
fmt = "{h}h {m}m {s}s"
if days:
fmt = "{d}d " + fmt
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
@commands.group()
async def embedset(self, ctx: commands.Context):
"""
Commands for toggling embeds on or off.
This setting determines whether or not to
use embeds as a response to a command (for
commands that support it). The default is to
use embeds.
"""
if ctx.invoked_subcommand is None:
text = "Embed settings:\n\n"
global_default = await self.bot.db.embeds()
text += "Global default: {}\n".format(global_default)
if ctx.guild:
guild_setting = await self.bot.db.guild(ctx.guild).embeds()
text += "Guild setting: {}\n".format(guild_setting)
user_setting = await self.bot.db.user(ctx.author).embeds()
text += "User setting: {}".format(user_setting)
await ctx.send(box(text))
@embedset.command(name="global")
@checks.is_owner()
async def embedset_global(self, ctx: commands.Context):
"""
Toggle the global embed setting.
This is used as a fallback if the user
or guild hasn't set a preference. The
default is to use embeds.
"""
current = await self.bot.db.embeds()
await self.bot.db.embeds.set(not current)
await ctx.send(
"Embeds are now {} by default.".format("disabled" if current else "enabled")
)
@embedset.command(name="guild")
@checks.guildowner_or_permissions(administrator=True)
@commands.guild_only()
async def embedset_guild(self, ctx: commands.Context, enabled: bool = None):
"""
Toggle the guild's embed setting.
If enabled is None, the setting will be unset and
the global default will be used instead.
If set, this is used instead of the global default
to determine whether or not to use embeds. This is
used for all commands done in a guild channel except
for help commands.
"""
await self.bot.db.guild(ctx.guild).embeds.set(enabled)
if enabled is None:
await ctx.send("Embeds will now fall back to the global setting.")
else:
await ctx.send(
"Embeds are now {} for this guild.".format("enabled" if enabled else "disabled")
)
@embedset.command(name="user")
async def embedset_user(self, ctx: commands.Context, enabled: bool = None):
"""
Toggle the user's embed setting.
If enabled is None, the setting will be unset and
the global default will be used instead.
If set, this is used instead of the global default
to determine whether or not to use embeds. This is
used for all commands done in a DM with the bot, as
well as all help commands everywhere.
"""
await self.bot.db.user(ctx.author).embeds.set(enabled)
if enabled is None:
await ctx.send("Embeds will now fall back to the global setting.")
else:
await ctx.send(
"Embeds are now {} for you.".format("enabled" if enabled else "disabled")
)
@commands.command()
@checks.is_owner()
async def traceback(self, ctx: commands.Context, public: bool = False):
"""Sends to the owner the last command exception that has occurred
If public (yes is specified), it will be sent to the chat instead"""
if not public:
destination = ctx.author
else:
destination = ctx.channel
if self.bot._last_exception:
for page in pagify(self.bot._last_exception, shorten_by=10):
await destination.send(box(page, lang="py"))
else:
await ctx.send("No exception has occurred yet")
@commands.command()
@checks.is_owner()
async def invite(self, ctx: commands.Context):
"""Show's AN's invite url"""
await ctx.author.send(await self._invite_url())
@commands.command()
@commands.guild_only()
@checks.is_owner()
async def leave(self, ctx: commands.Context):
"""Leaves server"""
await ctx.send("Are you sure you want me to leave this server? (y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred)
except asyncio.TimeoutError:
await ctx.send("Response timed out.")
return
else:
if pred.result is True:
await ctx.send("Alright. Bye :wave:")
log.debug("Leaving guild '{}'".format(ctx.guild.name))
await ctx.guild.leave()
else:
await ctx.send("Alright, I'll stay then :)")
@commands.command()
@checks.is_owner()
async def servers(self, ctx: commands.Context):
"""Lists and allows to leave servers"""
guilds = sorted(list(self.bot.guilds), key=lambda s: s.name.lower())
msg = ""
responses = []
for i, server in enumerate(guilds, 1):
msg += "{}: {}\n".format(i, server.name)
responses.append(str(i))
for page in pagify(msg, ["\n"]):
await ctx.send(page)
query = await ctx.send("To leave a server, just type its number.")
pred = MessagePredicate.contained_in(responses, ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
except asyncio.TimeoutError:
try:
await query.delete()
except discord.errors.NotFound:
pass
else:
await self.leave_confirmation(guilds[pred.result], ctx)
async def leave_confirmation(self, guild, ctx):
if guild.owner.id == ctx.bot.user.id:
await ctx.send("I cannot leave a guild I am the owner of.")
return
await ctx.send("Are you sure you want me to leave {}? (yes/no)".format(guild.name))
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.bot.wait_for("message", check=pred, timeout=15)
if pred.result is True:
await guild.leave()
if guild != ctx.guild:
await ctx.send("Done.")
else:
await ctx.send("Alright then.")
except asyncio.TimeoutError:
await ctx.send("Response timed out.")
@commands.command()
@checks.is_owner()
async def load(self, ctx: commands.Context, *cogs: str):
"""Loads packages"""
if not cogs:
return await ctx.send_help()
async with ctx.typing():
loaded, failed, not_found, already_loaded = await self._load(cogs)
if loaded:
fmt = "Loaded {packs}."
formed = self._get_package_strings(loaded, fmt)
await ctx.send(formed)
if already_loaded:
fmt = "The package{plural} {packs} {other} already loaded."
formed = self._get_package_strings(already_loaded, fmt, ("is", "are"))
await ctx.send(formed)
if failed:
fmt = (
"Failed to load package{plural} {packs}. Check your console or "
"logs for details."
)
formed = self._get_package_strings(failed, fmt)
await ctx.send(formed)
if not_found:
fmt = "The package{plural} {packs} {other} not found in any cog path."
formed = self._get_package_strings(not_found, fmt, ("was", "were"))
await ctx.send(formed)
@commands.command()
@checks.is_owner()
async def unload(self, ctx: commands.Context, *cogs: str):
"""Unloads packages"""
if not cogs:
return await ctx.send_help()
unloaded, failed = await self._unload(cogs)
if unloaded:
fmt = "Package{plural} {packs} {other} unloaded."
formed = self._get_package_strings(unloaded, fmt, ("was", "were"))
await ctx.send(formed)
if failed:
fmt = "The package{plural} {packs} {other} not loaded."
formed = self._get_package_strings(failed, fmt, ("is", "are"))
await ctx.send(formed)
@commands.command(name="reload")
@checks.is_owner()
async def reload(self, ctx: commands.Context, *cogs: str):
"""Reloads packages"""
if not cogs:
return await ctx.send_help()
async with ctx.typing():
loaded, failed, not_found, already_loaded = await self._reload(cogs)
if loaded:
fmt = "Package{plural} {packs} {other} reloaded."
formed = self._get_package_strings(loaded, fmt, ("was", "were"))
await ctx.send(formed)
if failed:
fmt = "Failed to reload package{plural} {packs}. Check your logs for details"
formed = self._get_package_strings(failed, fmt)
await ctx.send(formed)
if not_found:
fmt = "The package{plural} {packs} {other} not found in any cog path."
formed = self._get_package_strings(not_found, fmt, ("was", "were"))
await ctx.send(formed)
@commands.command(name="shutdown")
@checks.is_owner()
async def _shutdown(self, ctx: commands.Context, silently: bool = False):
"""Shuts down the bot"""
wave = "\N{WAVING HAND SIGN}"
skin = "\N{EMOJI MODIFIER FITZPATRICK TYPE-3}"
with contextlib.suppress(discord.HTTPException):
if not silently:
await ctx.send("Shutting down... " + wave + skin)
await ctx.bot.shutdown()
@commands.command(name="restart")
@checks.is_owner()
async def _restart(self, ctx: commands.Context, silently: bool = False):
"""Attempts to restart AN
Makes AN quit with exit code 26
The restart is not guaranteed: it must be dealt
with by the process manager in use"""
with contextlib.suppress(discord.HTTPException):
if not silently:
await ctx.send("Restarting...")
await ctx.bot.shutdown(restart=True)
@commands.group(name="set")
async def _set(self, ctx: commands.Context):
"""Changes AN's settings"""
if ctx.invoked_subcommand is None:
if ctx.guild:
guild = ctx.guild
admin_role = (
guild.get_role(await ctx.bot.db.guild(ctx.guild).admin_role()) or "Not set"
)
mod_role = (
guild.get_role(await ctx.bot.db.guild(ctx.guild).mod_role()) or "Not set"
)
prefixes = await ctx.bot.db.guild(ctx.guild).prefix()
guild_settings = f"Admin role: {admin_role}\nMod role: {mod_role}\n"
else:
guild_settings = ""
prefixes = None # This is correct. The below can happen in a guild.
if not prefixes:
prefixes = await ctx.bot.db.prefix()
locale = await ctx.bot.db.locale()
prefix_string = " ".join(prefixes)
settings = (
f"{ctx.bot.user.name} Settings:\n\n"
f"Prefixes: {prefix_string}\n"
f"{guild_settings}"
f"Locale: {locale}"
)
await ctx.send(box(settings))
@_set.command()
@checks.guildowner()
@commands.guild_only()
async def adminrole(self, ctx: commands.Context, *, role: discord.Role):
"""Sets the admin role for this server"""
await ctx.bot.db.guild(ctx.guild).admin_role.set(role.id)
await ctx.send("The admin role for this guild has been set.")
@_set.command()
@checks.guildowner()
@commands.guild_only()
async def modrole(self, ctx: commands.Context, *, role: discord.Role):
"""Sets the mod role for this server"""
await ctx.bot.db.guild(ctx.guild).mod_role.set(role.id)
await ctx.send("The mod role for this guild has been set.")
@_set.command(aliases=["usebotcolor"])
@checks.guildowner()
@commands.guild_only()
async def usebotcolour(self, ctx: commands.Context):
"""
Toggle whether to use the bot owner-configured colour for embeds.
Default is to not use the bot's configured colour, in which case the
colour used will be the colour of the bot's top role.
"""
current_setting = await ctx.bot.db.guild(ctx.guild).use_bot_color()
await ctx.bot.db.guild(ctx.guild).use_bot_color.set(not current_setting)
await ctx.send(
"The bot {} use its configured color for embeds.".format(
"will not" if current_setting else "will"
)
)
@_set.command()
@checks.guildowner()
@commands.guild_only()
async def serverfuzzy(self, ctx: commands.Context):
"""
Toggle whether to enable fuzzy command search for the server.
Default is for fuzzy command search to be disabled.
"""
current_setting = await ctx.bot.db.guild(ctx.guild).fuzzy()
await ctx.bot.db.guild(ctx.guild).fuzzy.set(not current_setting)
await ctx.send(
"Fuzzy command search has been {} for this server.".format(
"disabled" if current_setting else "enabled"
)
)
@_set.command()
@checks.is_owner()
async def fuzzy(self, ctx: commands.Context):
"""
Toggle whether to enable fuzzy command search in DMs.
Default is for fuzzy command search to be disabled.
"""
current_setting = await ctx.bot.db.fuzzy()
await ctx.bot.db.fuzzy.set(not current_setting)
await ctx.send(
"Fuzzy command search has been {} in DMs.".format(
"disabled" if current_setting else "enabled"
)
)
@_set.command(aliases=["color"])
@checks.is_owner()
async def colour(self, ctx: commands.Context, *, colour: discord.Colour = None):
"""
Sets a default colour to be used for the bot's embeds.
Acceptable values for the colour parameter can be found at:
http://discordpy.readthedocs.io/en/rewrite/ext/commands/api.html#discord.ext.commands.ColourConverter
"""
if colour is None:
ctx.bot.color = discord.Color.red()
await ctx.bot.db.color.set(discord.Color.red().value)
return await ctx.send("The color has been reset.")
ctx.bot.color = colour
await ctx.bot.db.color.set(colour.value)
await ctx.send("The color has been set.")
@_set.command()
@checks.is_owner()
async def avatar(self, ctx: commands.Context, url: str):
"""Sets AN's avatar"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
data = await r.read()
try:
await ctx.bot.user.edit(avatar=data)
except discord.HTTPException:
await ctx.send(
"Failed. Remember that you can edit my avatar "
"up to two times a hour. The URL must be a "
"direct link to a JPG / PNG."
)
except discord.InvalidArgument:
await ctx.send("JPG / PNG format only.")
else:
await ctx.send("Done.")
@_set.command(name="game")
@checks.bot_in_a_guild()
@checks.is_owner()
async def _game(self, ctx: commands.Context, *, game: str = None):
"""Sets AN's playing status"""
if game:
game = discord.Game(name=game)
else:
game = None
status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online
await ctx.bot.change_presence(status=status, activity=game)
await ctx.send("Game set.")
@_set.command(name="listening")
@checks.bot_in_a_guild()
@checks.is_owner()
async def _listening(self, ctx: commands.Context, *, listening: str = None):
"""Sets AN's listening status"""
status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online
if listening:
activity = discord.Activity(name=listening, type=discord.ActivityType.listening)
else:
activity = None
await ctx.bot.change_presence(status=status, activity=activity)
await ctx.send("Listening set.")
@_set.command(name="watching")
@checks.bot_in_a_guild()
@checks.is_owner()
async def _watching(self, ctx: commands.Context, *, watching: str = None):
"""Sets AN's watching status"""
status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else discord.Status.online
if watching:
activity = discord.Activity(name=watching, type=discord.ActivityType.watching)
else:
activity = None
await ctx.bot.change_presence(status=status, activity=activity)
await ctx.send("Watching set.")
@_set.command()
@checks.bot_in_a_guild()
@checks.is_owner()
async def status(self, ctx: commands.Context, *, status: str):
"""Sets AN's status
Available statuses:
online
idle
dnd
invisible
"""
statuses = {
"online": discord.Status.online,
"idle": discord.Status.idle,
"dnd": discord.Status.dnd,
"invisible": discord.Status.invisible,
}
game = ctx.bot.guilds[0].me.activity if len(ctx.bot.guilds) > 0 else None
try:
status = statuses[status.lower()]
except KeyError:
await ctx.send_help()
else:
await ctx.bot.change_presence(status=status, activity=game)
await ctx.send("Status changed to {}.".format(status))
@_set.command()
@checks.bot_in_a_guild()
@checks.is_owner()
async def stream(self, ctx: commands.Context, streamer=None, *, stream_title=None):
"""Sets AN's streaming status
Leaving both streamer and stream_title empty will clear it."""
status = ctx.bot.guilds[0].me.status if len(ctx.bot.guilds) > 0 else None
if stream_title:
stream_title = stream_title.strip()
if "twitch.tv/" not in streamer:
streamer = "https://www.twitch.tv/" + streamer
activity = discord.Streaming(url=streamer, name=stream_title)
await ctx.bot.change_presence(status=status, activity=activity)
elif streamer is not None:
await ctx.send_help()
return
else:
await ctx.bot.change_presence(activity=None, status=status)
await ctx.send("Done.")
@_set.command(name="username", aliases=["name"])
@checks.is_owner()
async def _username(self, ctx: commands.Context, *, username: str):
"""Sets AN's username"""
try:
await self._name(name=username)
except discord.HTTPException:
await ctx.send(
_(
"Failed to change name. Remember that you can "
"only do it up to 2 times an hour. Use "
"nicknames if you need frequent changes. "
"`{}set nickname`"
).format(ctx.prefix)
)
else:
await ctx.send("Done.")
@_set.command(name="nickname")
@checks.admin()
@commands.guild_only()
async def _nickname(self, ctx: commands.Context, *, nickname: str = None):
"""Sets AN's nickname"""
try:
await ctx.guild.me.edit(nick=nickname)
except discord.Forbidden:
await ctx.send("I lack the permissions to change my own nickname.")
else:
await ctx.send("Done.")
@_set.command(aliases=["prefixes"])
@checks.is_owner()
async def prefix(self, ctx: commands.Context, *prefixes: str):
"""Sets AN's global prefix(es)"""
if not prefixes:
await ctx.send_help()
return
await self._prefixes(prefixes)
await ctx.send("Prefix set.")
@_set.command(aliases=["serverprefixes"])
@checks.admin()
@commands.guild_only()
async def serverprefix(self, ctx: commands.Context, *prefixes: str):
"""Sets AN's server prefix(es)"""
if not prefixes:
await ctx.bot.db.guild(ctx.guild).prefix.set([])
await ctx.send("Guild prefixes have been reset.")
return
prefixes = sorted(prefixes, reverse=True)
await ctx.bot.db.guild(ctx.guild).prefix.set(prefixes)
await ctx.send("Prefix set.")
@_set.command()
@commands.cooldown(1, 60 * 10, commands.BucketType.default)
async def owner(self, ctx: commands.Context):
"""Sets AN's main owner"""
# According to the Python docs this is suitable for cryptographic use
random = SystemRandom()
length = random.randint(25, 35)
chars = ascii_letters + digits
token = ""
for i in range(length):
token += random.choice(chars)
log.info("{0} ({0.id}) requested to be set as owner.".format(ctx.author))
print("\nVerification token:")
print(token)
await ctx.send("Remember:\n" + OWNER_DISCLAIMER)
await asyncio.sleep(5)
await ctx.send(
"I have printed a one-time token in the console. "
"Copy and paste it here to confirm you are the owner."
)
try:
message = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=60
)
except asyncio.TimeoutError:
self.owner.reset_cooldown(ctx)
await ctx.send(
"The `{prefix}set owner` request has timed out.".format(prefix=ctx.prefix)
)
else:
if message.content.strip() == token:
self.owner.reset_cooldown(ctx)
await ctx.bot.db.owner.set(ctx.author.id)
ctx.bot.owner_id = ctx.author.id
await ctx.send("You have been set as owner.")
else:
await ctx.send("Invalid token.")
@_set.command()
@checks.is_owner()
async def token(self, ctx: commands.Context, token: str):
"""Change bot token."""
if not isinstance(ctx.channel, discord.DMChannel):
try:
await ctx.message.delete()
except discord.Forbidden:
pass
await ctx.send(
"Please use that command in DM. Since users probably saw your token,"
" it is recommended to reset it right now. Go to the following link and"
" select `Reveal Token` and `Generate a new token?`."
"\n\nhttps://discordapp.com/developers/applications/me/{}".format(self.bot.user.id)
)
return
await ctx.bot.db.token.set(token)
await ctx.send("Token set. Restart me.")
@_set.command()
@checks.is_owner()
async def locale(self, ctx: commands.Context, locale_name: str):
"""
Changes bot locale.
Use [p]listlocales to get a list of available locales.
To reset to English, use "en-US".
"""
i18n.set_locale(locale_name)
await ctx.bot.db.locale.set(locale_name)
await ctx.send("Locale has been set.")
@_set.command()
@checks.is_owner()
async def sentry(self, ctx: commands.Context, on_or_off: bool):
"""Enable or disable Sentry logging.
Sentry is the service AN uses to manage error reporting. This should
be disabled if you have made your own modifications to the anbot
package.
"""
await ctx.bot.db.enable_sentry.set(on_or_off)
if on_or_off:
ctx.bot.enable_sentry()
await ctx.send("Done. Sentry logging is now enabled.")
else:
ctx.bot.disable_sentry()
await ctx.send("Done. Sentry logging is now disabled.")
@commands.group()
@checks.is_owner()
async def helpset(self, ctx: commands.Context):
"""Manage settings for the help command."""
pass
@helpset.command(name="pagecharlimit")
async def helpset_pagecharlimt(self, ctx: commands.Context, limit: int):
"""Set the character limit for each page in the help message.
This setting only applies to embedded help.
Please note that setting a relitavely small character limit may
mean some pages will exceed this limit. This is because categories
are never spread across multiple pages in the help message.
The default value is 1000 characters.
"""
if limit <= 0:
await ctx.send("You must give a positive value!")
return
await ctx.bot.db.help.page_char_limit.set(limit)
await ctx.send("Done. The character limit per page has been set to {}.".format(limit))
@helpset.command(name="maxpages")
async def helpset_maxpages(self, ctx: commands.Context, pages: int):
"""Set the maximum number of help pages sent in a server channel.
This setting only applies to embedded help.
If a help message contains more pages than this value, the help message will
be sent to the command author via DM. This is to help reduce spam in server
text channels.
The default value is 2 pages.
"""
if pages < 0:
await ctx.send("You must give a value of zero or greater!")
return
await ctx.bot.db.help.max_pages_in_guild.set(pages)
await ctx.send("Done. The page limit has been set to {}.".format(pages))
@helpset.command(name="tagline")
async def helpset_tagline(self, ctx: commands.Context, *, tagline: str = None):
"""
Set the tagline to be used.
This setting only applies to embedded help. If no tagline is
specified, the default will be used instead.
"""
if tagline is None:
await ctx.bot.db.help.tagline.set("")
return await ctx.send("The tagline has been reset.")
if len(tagline) > 2048:
await ctx.send(
"Your tagline is too long! Please shorten it to be "
"no more than 2048 characters long."
)
return
await ctx.bot.db.help.tagline.set(tagline)
await ctx.send("The tagline has been set to {}.".format(tagline[:1900]))
@commands.command()
@checks.is_owner()
async def listlocales(self, ctx: commands.Context):
"""
Lists all available locales
Use `[p]set locale` to set a locale
"""
async with ctx.channel.typing():
red_dist = pkg_resources.get_distribution("red-discordbot")
red_path = Path(red_dist.location) / "anbot"
locale_list = sorted(set([loc.stem for loc in list(red_path.glob("**/*.po"))]))
if not locale_list:
await ctx.send("No languages found.")
return
pages = pagify("\n".join(locale_list), shorten_by=26)
await ctx.send_interactive(pages, box_lang="Available Locales:")
@commands.command()
@checks.is_owner()
async def backup(self, ctx: commands.Context, *, backup_path: str = None):
"""Creates a backup of all data for the instance."""
from anbot.core.data_manager import basic_config, instance_name
from anbot.core.drivers.red_json import JSON
data_dir = Path(basic_config["DATA_PATH"])
if basic_config["STORAGE_TYPE"] == "MongoDB":
from anbot.core.drivers.red_mongo import Mongo
m = Mongo("Core", "0", **basic_config["STORAGE_DETAILS"])
db = m.db
collection_names = await db.list_collection_names()
for c_name in collection_names:
if c_name == "Core":
c_data_path = data_dir / basic_config["CORE_PATH_APPEND"]
else:
c_data_path = data_dir / basic_config["COG_PATH_APPEND"] / c_name
docs = await db[c_name].find().to_list(None)
for item in docs:
item_id = str(item.pop("_id"))
output = item
target = JSON(c_name, item_id, data_path_override=c_data_path)
await target.jsonIO._threadsafe_save_json(output)
backup_filename = "redv3-{}-{}.tar.gz".format(
instance_name, ctx.message.created_at.strftime("%Y-%m-%d %H-%M-%S")
)
if data_dir.exists():
if not backup_path:
backup_pth = data_dir.home()
else:
backup_pth = Path(backup_path)
backup_file = backup_pth / backup_filename
to_backup = []
exclusions = [
"__pycache__",
"Lavalink.jar",
os.path.join("Downloader", "lib"),
os.path.join("CogManager", "cogs"),
os.path.join("RepoManager", "repos"),
]
downloader_cog = ctx.bot.get_cog("Downloader")
if downloader_cog and hasattr(downloader_cog, "_repo_manager"):
repo_output = []
repo_mgr = downloader_cog._repo_manager
for repo in repo_mgr._repos.values():
repo_output.append({"url": repo.url, "name": repo.name, "branch": repo.branch})
repo_filename = data_dir / "cogs" / "RepoManager" / "repos.json"
with open(str(repo_filename), "w") as f:
f.write(json.dumps(repo_output, indent=4))
instance_data = {instance_name: basic_config}
instance_file = data_dir / "instance.json"
with open(str(instance_file), "w") as instance_out:
instance_out.write(json.dumps(instance_data, indent=4))
for f in data_dir.glob("**/*"):
if not any(ex in str(f) for ex in exclusions):
to_backup.append(f)
with tarfile.open(str(backup_file), "w:gz") as tar:
for f in to_backup:
tar.add(str(f), recursive=False)
print(str(backup_file))
await ctx.send(
"A backup has been made of this instance. It is at {}.".format(backup_file)
)
if backup_file.stat().st_size > 8_000_000:
await ctx.send("This backup is to large to send via DM.")
return
await ctx.send("Would you like to receive a copy via DM? (y/n)")
pred = MessagePredicate.yes_or_no(ctx)
try:
await ctx.bot.wait_for("message", check=pred, timeout=60)
except asyncio.TimeoutError:
await ctx.send("Response timed out.")
else:
if pred.result is True:
await ctx.send("OK, it's on its way!")
try:
async with ctx.author.typing():
await ctx.author.send(
_("Here's a copy of the backup"),
file=discord.File(str(backup_file)),
)
except discord.Forbidden:
await ctx.send(
_("I don't seem to be able to DM you. Do you have closed DMs?")
)
except discord.HTTPException:
await ctx.send(_("I could not send the backup file."))
else:
await ctx.send(_("OK then."))
else:
await ctx.send(_("That directory doesn't seem to exist..."))
@commands.command()
@commands.cooldown(1, 60, commands.BucketType.user)
async def contact(self, ctx: commands.Context, *, message: str):
"""Sends a message to the owner"""
guild = ctx.message.guild
owner = discord.utils.get(ctx.bot.get_all_members(), id=ctx.bot.owner_id)
author = ctx.message.author
footer = _("User ID: {}").format(author.id)
if ctx.guild is None:
source = _("through DM")
else:
source = _("from {}").format(guild)
footer += _(" | Server ID: {}").format(guild.id)
# We need to grab the DM command prefix (global)
# Since it can also be set through cli flags, bot.db is not a reliable
# source. So we'll just mock a DM message instead.
fake_message = namedtuple("Message", "guild")
prefixes = await ctx.bot.command_prefix(ctx.bot, fake_message(guild=None))
prefix = prefixes[0]
content = _("Use `{}dm {} <text>` to reply to this user").format(prefix, author.id)
description = _("Sent by {} {}").format(author, source)
if isinstance(author, discord.Member):
colour = author.colour
else:
colour = discord.Colour.red()
if await ctx.embed_requested():
e = discord.Embed(colour=colour, description=message)
if author.avatar_url:
e.set_author(name=description, icon_url=author.avatar_url)
else:
e.set_author(name=description)
e.set_footer(text=footer)
try:
await owner.send(content, embed=e)
except discord.InvalidArgument:
await ctx.send(
_("I cannot send your message, I'm unable to find my owner... *sigh*")
)
except discord.HTTPException:
await ctx.send(_("I'm unable to deliver your message. Sorry."))
else:
await ctx.send(_("Your message has been sent."))
else:
msg_text = "{}\nMessage:\n\n{}\n{}".format(description, message, footer)
try:
await owner.send("{}\n{}".format(content, box(msg_text)))
except discord.InvalidArgument:
await ctx.send(
_("I cannot send your message, I'm unable to find my owner... *sigh*")
)
except discord.HTTPException:
await ctx.send(_("I'm unable to deliver your message. Sorry."))
else:
await ctx.send(_("Your message has been sent."))
@commands.command()
@checks.is_owner()
async def dm(self, ctx: commands.Context, user_id: int, *, message: str):
"""Sends a DM to a user
This command needs a user id to work.
To get a user id enable 'developer mode' in Discord's
settings, 'appearance' tab. Then right click a user
and copy their id"""
destination = discord.utils.get(ctx.bot.get_all_members(), id=user_id)
if destination is None:
await ctx.send(
_(
"Invalid ID or user not found. You can only "
"send messages to people I share a server "
"with."
)
)
return
fake_message = namedtuple("Message", "guild")
prefixes = await ctx.bot.command_prefix(ctx.bot, fake_message(guild=None))
prefix = prefixes[0]
description = _("Owner of {}").format(ctx.bot.user)
content = _("You can reply to this message with {}contact").format(prefix)
if await ctx.embed_requested():
e = discord.Embed(colour=discord.Colour.red(), description=message)
e.set_footer(text=content)
if ctx.bot.user.avatar_url:
e.set_author(name=description, icon_url=ctx.bot.user.avatar_url)
else:
e.set_author(name=description)
try:
await destination.send(embed=e)
except discord.HTTPException:
await ctx.send(
_("Sorry, I couldn't deliver your message to {}").format(destination)
)
else:
await ctx.send(_("Message delivered to {}").format(destination))
else:
response = "{}\nMessage:\n\n{}".format(description, message)
try:
await destination.send("{}\n{}".format(box(response), content))
except discord.HTTPException:
await ctx.send(
_("Sorry, I couldn't deliver your message to {}").format(destination)
)
else:
await ctx.send(_("Message delivered to {}").format(destination))
@commands.group()
@checks.is_owner()
async def whitelist(self, ctx: commands.Context):
"""
Whitelist management commands.
"""
pass
@whitelist.command(name="add")
async def whitelist_add(self, ctx, user: discord.User):
"""
Adds a user to the whitelist.
"""
async with ctx.bot.db.whitelist() as curr_list:
if user.id not in curr_list:
curr_list.append(user.id)
await ctx.send(_("User added to whitelist."))
@whitelist.command(name="list")
async def whitelist_list(self, ctx: commands.Context):
"""
Lists whitelisted users.
"""
curr_list = await ctx.bot.db.whitelist()
msg = _("Whitelisted Users:")
for user in curr_list:
msg += "\n\t- {}".format(user)
for page in pagify(msg):
await ctx.send(box(page))
@whitelist.command(name="remove")
async def whitelist_remove(self, ctx: commands.Context, *, user: discord.User):
"""
Removes user from whitelist.
"""
removed = False
async with ctx.bot.db.whitelist() as curr_list:
if user.id in curr_list:
removed = True
curr_list.remove(user.id)
if removed:
await ctx.send(_("User has been removed from whitelist."))
else:
await ctx.send(_("User was not in the whitelist."))
@whitelist.command(name="clear")
async def whitelist_clear(self, ctx: commands.Context):
"""
Clears the whitelist.
"""
await ctx.bot.db.whitelist.set([])
await ctx.send(_("Whitelist has been cleared."))
@commands.group()
@checks.is_owner()
async def blacklist(self, ctx: commands.Context):
"""
blacklist management commands.
"""
pass
@blacklist.command(name="add")
async def blacklist_add(self, ctx: commands.Context, *, user: discord.User):
"""
Adds a user to the blacklist.
"""
if await ctx.bot.is_owner(user):
await ctx.send(_("You cannot blacklist an owner!"))
return
async with ctx.bot.db.blacklist() as curr_list:
if user.id not in curr_list:
curr_list.append(user.id)
await ctx.send(_("User added to blacklist."))
@blacklist.command(name="list")
async def blacklist_list(self, ctx: commands.Context):
"""
Lists blacklisted users.
"""
curr_list = await ctx.bot.db.blacklist()
msg = _("blacklisted Users:")
for user in curr_list:
msg += "\n\t- {}".format(user)
for page in pagify(msg):
await ctx.send(box(page))
@blacklist.command(name="remove")
async def blacklist_remove(self, ctx: commands.Context, *, user: discord.User):
"""
Removes user from blacklist.
"""
removed = False
async with ctx.bot.db.blacklist() as curr_list:
if user.id in curr_list:
removed = True
curr_list.remove(user.id)
if removed:
await ctx.send(_("User has been removed from blacklist."))
else:
await ctx.send(_("User was not in the blacklist."))
@blacklist.command(name="clear")
async def blacklist_clear(self, ctx: commands.Context):
"""
Clears the blacklist.
"""
await ctx.bot.db.blacklist.set([])
await ctx.send(_("blacklist has been cleared."))
@commands.group()
@commands.guild_only()
@checks.admin_or_permissions(administrator=True)
async def localwhitelist(self, ctx: commands.Context):
"""
Whitelist management commands.
"""
pass
@localwhitelist.command(name="add")
async def localwhitelist_add(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Adds a user or role to the whitelist.
"""
user = isinstance(user_or_role, discord.Member)
async with ctx.bot.db.guild(ctx.guild).whitelist() as curr_list:
if obj.id not in curr_list:
curr_list.append(obj.id)
if user:
await ctx.send(_("User added to whitelist."))
else:
await ctx.send(_("Role added to whitelist."))
@localwhitelist.command(name="list")
async def localwhitelist_list(self, ctx: commands.Context):
"""
Lists whitelisted users and roles.
"""
curr_list = await ctx.bot.db.guild(ctx.guild).whitelist()
msg = _("Whitelisted Users and roles:")
for obj in curr_list:
msg += "\n\t- {}".format(obj)
for page in pagify(msg):
await ctx.send(box(page))
@localwhitelist.command(name="remove")
async def localwhitelist_remove(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Removes user or role from whitelist.
"""
user = isinstance(user_or_role, discord.Member)
removed = False
async with ctx.bot.db.guild(ctx.guild).whitelist() as curr_list:
if obj.id in curr_list:
removed = True
curr_list.remove(obj.id)
if removed:
if user:
await ctx.send(_("User has been removed from whitelist."))
else:
await ctx.send(_("Role has been removed from whitelist."))
else:
if user:
await ctx.send(_("User was not in the whitelist."))
else:
await ctx.send(_("Role was not in the whitelist."))
@localwhitelist.command(name="clear")
async def localwhitelist_clear(self, ctx: commands.Context):
"""
Clears the whitelist.
"""
await ctx.bot.db.guild(ctx.guild).whitelist.set([])
await ctx.send(_("Whitelist has been cleared."))
@commands.group()
@commands.guild_only()
@checks.admin_or_permissions(administrator=True)
async def localblacklist(self, ctx: commands.Context):
"""
blacklist management commands.
"""
pass
@localblacklist.command(name="add")
async def localblacklist_add(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Adds a user or role to the blacklist.
"""
user = isinstance(user_or_role, discord.Member)
if user and await ctx.bot.is_owner(obj):
await ctx.send(_("You cannot blacklist an owner!"))
return
async with ctx.bot.db.guild(ctx.guild).blacklist() as curr_list:
if obj.id not in curr_list:
curr_list.append(obj.id)
if user:
await ctx.send(_("User added to blacklist."))
else:
await ctx.send(_("Role added to blacklist."))
@localblacklist.command(name="list")
async def localblacklist_list(self, ctx: commands.Context):
"""
Lists blacklisted users and roles.
"""
curr_list = await ctx.bot.db.guild(ctx.guild).blacklist()
msg = _("blacklisted Users and Roles:")
for obj in curr_list:
msg += "\n\t- {}".format(obj)
for page in pagify(msg):
await ctx.send(box(page))
@localblacklist.command(name="remove")
async def localblacklist_remove(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Removes user or role from blacklist.
"""
removed = False
user = isinstance(user_or_role, discord.Member)
async with ctx.bot.db.guild(ctx.guild).blacklist() as curr_list:
if obj.id in curr_list:
removed = True
curr_list.remove(obj.id)
if removed:
if user:
await ctx.send(_("User has been removed from blacklist."))
else:
await ctx.send(_("Role has been removed from blacklist."))
else:
if user:
await ctx.send(_("User was not in the blacklist."))
else:
await ctx.send(_("Role was not in the blacklist."))
@localblacklist.command(name="clear")
async def localblacklist_clear(self, ctx: commands.Context):
"""
Clears the blacklist.
"""
await ctx.bot.db.guild(ctx.guild).blacklist.set([])
await ctx.send(_("blacklist has been cleared."))
@checks.guildowner_or_permissions(administrator=True)
@commands.group(name="command")
async def command_manager(self, ctx: commands.Context):
"""Manage the bot's commands."""
pass
@command_manager.group(name="disable", invoke_without_command=True)
async def command_disable(self, ctx: commands.Context, *, command: str):
"""Disable a command.
If you're the bot owner, this will disable commands
globally by default.
"""
# Select the scope based on the author's privileges
if await ctx.bot.is_owner(ctx.author):
await ctx.invoke(self.command_disable_global, command=command)
else:
await ctx.invoke(self.command_disable_guild, command=command)
@checks.is_owner()
@command_disable.command(name="global")
async def command_disable_global(self, ctx: commands.Context, *, command: str):
"""Disable a command globally."""
command_obj: commands.Command = ctx.bot.get_command(command)
if command_obj is None:
await ctx.send(
_("I couldn't find that command. Please note that it is case sensitive.")
)
return
async with ctx.bot.db.disabled_commands() as disabled_commands:
if command not in disabled_commands:
disabled_commands.append(command_obj.qualified_name)
if not command_obj.enabled:
await ctx.send(_("That command is already disabled globally."))
return
command_obj.enabled = False
await ctx.tick()
@commands.guild_only()
@command_disable.command(name="server", aliases=["guild"])
async def command_disable_guild(self, ctx: commands.Context, *, command: str):
"""Disable a command in this server only."""
command_obj: commands.Command = ctx.bot.get_command(command)
if command_obj is None:
await ctx.send(
_("I couldn't find that command. Please note that it is case sensitive.")
)
return
async with ctx.bot.db.guild(ctx.guild).disabled_commands() as disabled_commands:
if command not in disabled_commands:
disabled_commands.append(command_obj.qualified_name)
done = command_obj.disable_in(ctx.guild)
if not done:
await ctx.send(_("That command is already disabled in this server."))
else:
await ctx.tick()
@command_manager.group(name="enable", invoke_without_command=True)
async def command_enable(self, ctx: commands.Context, *, command: str):
"""Enable a command.
If you're a bot owner, this will try to enable a globally
disabled command by default.
"""
if await ctx.bot.is_owner(ctx.author):
await ctx.invoke(self.command_enable_global, command=command)
else:
await ctx.invoke(self.command_enable_guild, command=command)
@commands.is_owner()
@command_enable.command(name="global")
async def command_enable_global(self, ctx: commands.Context, *, command: str):
"""Enable a command globally."""
command_obj: commands.Command = ctx.bot.get_command(command)
if command_obj is None:
await ctx.send(
_("I couldn't find that command. Please note that it is case sensitive.")
)
return
async with ctx.bot.db.disabled_commands() as disabled_commands:
with contextlib.suppress(ValueError):
disabled_commands.remove(command_obj.qualified_name)
if command_obj.enabled:
await ctx.send(_("That command is already enabled globally."))
return
command_obj.enabled = True
await ctx.tick()
@commands.guild_only()
@command_enable.command(name="server", aliases=["guild"])
async def command_enable_guild(self, ctx: commands.Context, *, command: str):
"""Enable a command in this server."""
command_obj: commands.Command = ctx.bot.get_command(command)
if command_obj is None:
await ctx.send(
_("I couldn't find that command. Please note that it is case sensitive.")
)
return
async with ctx.bot.db.guild(ctx.guild).disabled_commands() as disabled_commands:
with contextlib.suppress(ValueError):
disabled_commands.remove(command_obj.qualified_name)
done = command_obj.enable_in(ctx.guild)
if not done:
await ctx.send(_("That command is already enabled in this server."))
else:
await ctx.tick()
@checks.is_owner()
@command_manager.command(name="disabledmsg")
async def command_disabledmsg(self, ctx: commands.Context, *, message: str = ""):
"""Set the bot's response to disabled commands.
Leave blank to send nothing.
To include the command name in the message, include the
`{command}` placeholder.
"""
await ctx.bot.db.disabled_command_msg.set(message)
await ctx.tick()
@commands.guild_only()
@checks.guildowner_or_permissions(manage_guild=True)
@commands.group(name="autoimmune")
async def autoimmune_group(self, ctx: commands.Context):
"""
Server settings for immunity from automated actions
"""
pass
@autoimmune_group.command(name="list")
async def autoimmune_list(self, ctx: commands.Context):
"""
Get's the current members and roles
configured for automatic moderation action immunity
"""
ai_ids = await ctx.bot.db.guild(ctx.guild).autoimmune_ids()
roles = {r.name for r in ctx.guild.roles if r.id in ai_ids}
members = {str(m) for m in ctx.guild.members if m.id in ai_ids}
output = ""
if roles:
output += _("Roles immune from automated moderation actions:\n")
output += ", ".join(roles)
if members:
if roles:
output += "\n"
output += _("Members immune from automated moderation actions:\n")
output += ", ".join(members)
if not output:
output = _("No immunty settings here.")
for page in pagify(output):
await ctx.send(page)
@autoimmune_group.command(name="add")
async def autoimmune_add(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Makes a user or roles immune from automated moderation actions
"""
async with ctx.bot.db.guild(ctx.guild).autoimmune_ids() as ai_ids:
if user_or_role.id in ai_ids:
return await ctx.send(_("Already added."))
ai_ids.append(user_or_role.id)
await ctx.tick()
@autoimmune_group.command(name="remove")
async def autoimmune_remove(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Makes a user or roles immune from automated moderation actions
"""
async with ctx.bot.db.guild(ctx.guild).autoimmune_ids() as ai_ids:
if user_or_role.id not in ai_ids:
return await ctx.send(_("Not in list."))
ai_ids.remove(user_or_role.id)
await ctx.tick()
@autoimmune_group.command(name="isimmune")
async def autoimmune_checkimmune(
self, ctx: commands.Context, *, user_or_role: Union[discord.Member, discord.Role]
):
"""
Checks if a user or role would be considered immune from automated actions
"""
if await ctx.bot.is_automod_immune(user_or_role):
await ctx.send(_("They are immune"))
else:
await ctx.send(_("They are not Immune"))
# RPC handlers
async def rpc_load(self, request):
cog_name = request.params[0]
spec = await self.bot.cog_mgr.find_cog(cog_name)
if spec is None:
raise LookupError("No such cog found.")
self._cleanup_and_refresh_modules(spec.name)
await self.bot.load_extension(spec)
async def rpc_unload(self, request):
cog_name = request.params[0]
self.bot.unload_extension(cog_name)
async def rpc_reload(self, request):
await self.rpc_unload(request)
await self.rpc_load(request) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/core_commands.py | core_commands.py |
import inspect
import logging
import os
import sys
import tempfile
from copy import deepcopy
from pathlib import Path
import appdirs
from discord.utils import deprecated
from . import commands
from .json_io import JsonIO
__all__ = [
"create_temp_config",
"load_basic_configuration",
"cog_data_path",
"core_data_path",
"load_bundled_data",
"bundled_data_path",
"storage_details",
"storage_type",
]
log = logging.getLogger("anbot.data_manager")
jsonio = None
basic_config = None
instance_name = None
basic_config_default = {"DATA_PATH": None, "COG_PATH_APPEND": "cogs", "CORE_PATH_APPEND": "core"}
config_dir = None
appdir = appdirs.AppDirs("AN-DiscordBot")
if sys.platform == "linux":
if 0 < os.getuid() < 1000:
config_dir = Path(appdir.site_data_dir)
if not config_dir:
config_dir = Path(appdir.user_config_dir)
config_file = config_dir / "config.json"
def create_temp_config():
"""
Creates a default instance for AN, so it can be ran
without creating an instance.
.. warning:: The data of this instance will be removed
on next system restart.
"""
name = "temporary_red"
default_dirs = deepcopy(basic_config_default)
default_dirs["DATA_PATH"] = tempfile.mkdtemp()
default_dirs["STORAGE_TYPE"] = "JSON"
default_dirs["STORAGE_DETAILS"] = {}
config = JsonIO(config_file)._load_json()
config[name] = default_dirs
JsonIO(config_file)._save_json(config)
def load_basic_configuration(instance_name_: str):
"""Loads the basic bootstrap configuration necessary for `Config`
to know where to store or look for data.
.. important::
It is necessary to call this function BEFORE getting any `Config`
objects!
Parameters
----------
instance_name_ : str
The instance name given by CLI argument and created during
anbot setup.
"""
global jsonio
global basic_config
global instance_name
jsonio = JsonIO(config_file)
instance_name = instance_name_
try:
config = jsonio._load_json()
basic_config = config[instance_name]
except (FileNotFoundError, KeyError):
print(
"You need to configure the bot instance using `anbot-setup`"
" prior to running the bot."
)
sys.exit(1)
def _base_data_path() -> Path:
if basic_config is None:
raise RuntimeError("You must load the basic config before you can get the base data path.")
path = basic_config["DATA_PATH"]
return Path(path).resolve()
def cog_data_path(cog_instance=None, raw_name: str = None) -> Path:
"""Gets the base cog data path. If you want to get the folder with
which to store your own cog's data please pass in an instance
of your cog class.
Either ``cog_instance`` or ``raw_name`` will be used, not both.
Parameters
----------
cog_instance
The instance of the cog you wish to get a data path for.
raw_name : str
The name of the cog to get a data path for.
Returns
-------
pathlib.Path
If ``cog_instance`` is provided it will return a path to a folder
dedicated to a given cog. Otherwise it will return a path to the
folder that contains data for all cogs.
"""
try:
base_data_path = Path(_base_data_path())
except RuntimeError as e:
raise RuntimeError(
"You must load the basic config before you can get the cog data path."
) from e
cog_path = base_data_path / basic_config["COG_PATH_APPEND"]
if raw_name is not None:
cog_path = cog_path / raw_name
elif cog_instance is not None:
cog_path = cog_path / cog_instance.__class__.__name__
cog_path.mkdir(exist_ok=True, parents=True)
return cog_path.resolve()
def core_data_path() -> Path:
try:
base_data_path = Path(_base_data_path())
except RuntimeError as e:
raise RuntimeError(
"You must load the basic config before you can get the core data path."
) from e
core_path = base_data_path / basic_config["CORE_PATH_APPEND"]
core_path.mkdir(exist_ok=True, parents=True)
return core_path.resolve()
# noinspection PyUnusedLocal
@deprecated("bundled_data_path() without calling this function")
def load_bundled_data(cog_instance, init_location: str):
pass
def bundled_data_path(cog_instance: commands.Cog) -> Path:
"""
Get the path to the "data" directory bundled with this cog.
The bundled data folder must be located alongside the ``.py`` file
which contains the cog class.
.. important::
You should *NEVER* write to this directory.
Parameters
----------
cog_instance
An instance of your cog. If calling from a command or method of
your cog, this should be ``self``.
Returns
-------
pathlib.Path
Path object to the bundled data folder.
Raises
------
FileNotFoundError
If no bundled data folder exists.
"""
bundled_path = Path(inspect.getfile(cog_instance.__class__)).parent / "data"
if not bundled_path.is_dir():
raise FileNotFoundError("No such directory {}".format(bundled_path))
return bundled_path
def storage_type() -> str:
"""Gets the storage type as a string.
Returns
-------
str
"""
try:
return basic_config["STORAGE_TYPE"]
except KeyError as e:
raise RuntimeError("Bot basic config has not been loaded yet.") from e
def storage_details() -> dict:
"""Gets any details necessary for config drivers to load.
These are set on setup.
Returns
-------
dict
"""
try:
return basic_config["STORAGE_DETAILS"]
except KeyError as e:
raise RuntimeError("Bot basic config has not been loaded yet.") from e | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/data_manager.py | data_manager.py |
import contextlib
import sys
import codecs
import datetime
import logging
import traceback
from datetime import timedelta
from typing import List
import aiohttp
import discord
import pkg_resources
from colorama import Fore, Style, init
from pkg_resources import DistributionNotFound
from . import __version__ as an_version, version_info as an_version_info, VersionInfo, commands
from .data_manager import storage_type
from .utils.chat_formatting import inline, bordered, format_perms_list
from .utils import fuzzy_command_search, format_fuzzy_results
init()
INTRO = """
______ _ _ ______ _
| _ (_) | | | ___ \ | |
| | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_
| | | | / __|/ __/ _ \| '__/ _` | | ___ \/ _ \| __|
| |/ /| \__ \ (_| (_) | | | (_| | | |_/ / (_) | |_
|___/ |_|___/\___\___/|_| \__,_| \____/ \___/ \__|
"""
def should_log_sentry(exception) -> bool:
e = exception
while e.__cause__ is not None:
e = e.__cause__
tb = e.__traceback__
tb_frame = None
while tb is not None:
tb_frame = tb.tb_frame
tb = tb.tb_next
module = tb_frame.f_globals.get("__name__")
return module is not None and module.startswith("anbot")
def init_events(bot, cli_flags):
@bot.event
async def on_connect():
if bot.uptime is None:
print("Connected to Discord. Getting ready...")
@bot.event
async def on_ready():
if bot.uptime is not None:
return
bot.uptime = datetime.datetime.utcnow()
packages = []
if cli_flags.no_cogs is False:
packages.extend(await bot.db.packages())
if cli_flags.load_cogs:
packages.extend(cli_flags.load_cogs)
if packages:
# Load permissions first, for security reasons
try:
packages.remove("permissions")
except ValueError:
pass
else:
packages.insert(0, "permissions")
to_remove = []
print("Loading packages...")
for package in packages:
try:
spec = await bot.cog_mgr.find_cog(package)
await bot.load_extension(spec)
except Exception as e:
log.exception("Failed to load package {}".format(package), exc_info=e)
await bot.remove_loaded_package(package)
to_remove.append(package)
for package in to_remove:
packages.remove(package)
if packages:
print("Loaded packages: " + ", ".join(packages))
if bot.rpc_enabled:
await bot.rpc.initialize()
guilds = len(bot.guilds)
users = len(set([m for m in bot.get_all_members()]))
try:
data = await bot.application_info()
invite_url = discord.utils.oauth_url(data.id)
except:
invite_url = "Could not fetch invite url"
prefixes = cli_flags.prefix or (await bot.db.prefix())
lang = await bot.db.locale()
red_pkg = pkg_resources.get_distribution("AN-DiscordBot")
dpy_version = discord.__version__
INFO = [
str(bot.user),
"Prefixes: {}".format(", ".join(prefixes)),
"Language: {}".format(lang),
"AN Bot Version: {}".format(an_version),
"Discord.py Version: {}".format(dpy_version),
"Shards: {}".format(bot.shard_count),
]
if guilds:
INFO.extend(("Servers: {}".format(guilds), "Users: {}".format(users)))
else:
print("Ready. I'm not in any server yet!")
INFO.append("{} cogs with {} commands".format(len(bot.cogs), len(bot.commands)))
with contextlib.suppress(aiohttp.ClientError, discord.HTTPException):
async with aiohttp.ClientSession() as session:
async with session.get("https://pypi.python.org/pypi/red-discordbot/json") as r:
data = await r.json()
if VersionInfo.from_str(data["info"]["version"]) > an_version_info:
INFO.append(
"Outdated version! {} is available "
"but you're using {}".format(data["info"]["version"], an_version)
)
owner = await bot.get_user_info(bot.owner_id)
await owner.send(
"Your AN instance is out of date! {} is the current "
"version, however you are using {}!".format(
data["info"]["version"], an_version
)
)
INFO2 = []
sentry = await bot.db.enable_sentry()
mongo_enabled = storage_type() != "JSON"
reqs_installed = {"docs": None, "test": None}
for key in reqs_installed.keys():
reqs = [x.name for x in red_pkg._dep_map[key]]
try:
pkg_resources.require(reqs)
except DistributionNotFound:
reqs_installed[key] = False
else:
reqs_installed[key] = True
options = (
("Error Reporting", sentry),
("MongoDB", mongo_enabled),
("Voice", True),
("Docs", reqs_installed["docs"]),
("Tests", reqs_installed["test"]),
)
on_symbol, off_symbol, ascii_border = _get_startup_screen_specs()
for option, enabled in options:
enabled = on_symbol if enabled else off_symbol
INFO2.append("{} {}".format(enabled, option))
print(Fore.RED + INTRO)
print(Style.RESET_ALL)
print(bordered(INFO, INFO2, ascii_border=ascii_border))
if invite_url:
print("\nInvite URL: {}\n".format(invite_url))
bot.color = discord.Colour(await bot.db.color())
@bot.event
async def on_error(event_method, *args, **kwargs):
sentry_log.exception("Exception in {}".format(event_method))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send_help()
elif isinstance(error, commands.ConversionFailure):
if error.args:
await ctx.send(error.args[0])
else:
await ctx.send_help()
elif isinstance(error, commands.BadArgument):
await ctx.send_help()
elif isinstance(error, commands.DisabledCommand):
disabled_message = await bot.db.disabled_command_msg()
if disabled_message:
await ctx.send(disabled_message.replace("{command}", ctx.invoked_with))
elif isinstance(error, commands.CommandInvokeError):
log.exception(
"Exception in command '{}'".format(ctx.command.qualified_name),
exc_info=error.original,
)
if should_log_sentry(error):
sentry_log.exception(
"Exception in command '{}'".format(ctx.command.qualified_name),
exc_info=error.original,
)
message = "Error in command '{}'. Check your console or logs for details.".format(
ctx.command.qualified_name
)
exception_log = "Exception in command '{}'\n" "".format(ctx.command.qualified_name)
exception_log += "".join(
traceback.format_exception(type(error), error, error.__traceback__)
)
bot._last_exception = exception_log
if not hasattr(ctx.cog, "_{0.command.cog_name}__error".format(ctx)):
await ctx.send(inline(message))
elif isinstance(error, commands.CommandNotFound):
fuzzy_commands = await fuzzy_command_search(ctx)
if not fuzzy_commands:
pass
elif await ctx.embed_requested():
await ctx.send(embed=await format_fuzzy_results(ctx, fuzzy_commands, embed=True))
else:
await ctx.send(await format_fuzzy_results(ctx, fuzzy_commands, embed=False))
elif isinstance(error, commands.BotMissingPermissions):
if bin(error.missing.value).count("1") == 1: # Only one perm missing
plural = ""
else:
plural = "s"
await ctx.send(
"I require the {perms} permission{plural} to execute that command.".format(
perms=format_perms_list(error.missing), plural=plural
)
)
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, commands.NoPrivateMessage):
await ctx.send("That command is not available in DMs.")
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(
"This command is on cooldown. Try again in {:.2f}s".format(error.retry_after)
)
else:
log.exception(type(error).__name__, exc_info=error)
try:
sentry_error = error.original
except AttributeError:
sentry_error = error
if should_log_sentry(sentry_error):
sentry_log.exception("Unhandled command error.", exc_info=sentry_error)
@bot.event
async def on_message(message):
bot.counter["messages_read"] += 1
await bot.process_commands(message)
discord_now = message.created_at
if (
not bot.checked_time_accuracy
or (discord_now - timedelta(minutes=60)) > bot.checked_time_accuracy
):
system_now = datetime.datetime.utcnow()
diff = abs((discord_now - system_now).total_seconds())
if diff > 60:
log.warning(
"Detected significant difference (%d seconds) in system clock to discord's "
"clock. Any time sensitive code may fail.",
diff,
)
bot.checked_time_accuracy = discord_now
@bot.event
async def on_resumed():
bot.counter["sessions_resumed"] += 1
@bot.event
async def on_command(command):
bot.counter["processed_commands"] += 1
@bot.event
async def on_command_add(command: commands.Command):
disabled_commands = await bot.db.disabled_commands()
if command.qualified_name in disabled_commands:
command.enabled = False
for guild in bot.guilds:
disabled_commands = await bot.db.guild(guild).disabled_commands()
if command.qualified_name in disabled_commands:
command.disable_in(guild)
async def _guild_added(guild: discord.Guild):
disabled_commands = await bot.db.guild(guild).disabled_commands()
for command_name in disabled_commands:
command_obj = bot.get_command(command_name)
if command_obj is not None:
command_obj.disable_in(guild)
@bot.event
async def on_guild_join(guild: discord.Guild):
await _guild_added(guild)
@bot.event
async def on_guild_available(guild: discord.Guild):
# We need to check guild-disabled commands here since some cogs
# are loaded prior to `on_ready`.
await _guild_added(guild)
@bot.event
async def on_guild_leave(guild: discord.Guild):
# Clean up any unneeded checks
disabled_commands = await bot.db.guild(guild).disabled_commands()
for command_name in disabled_commands:
command_obj = bot.get_command(command_name)
if command_obj is not None:
command_obj.enable_in(guild)
def _get_startup_screen_specs():
"""Get specs for displaying the startup screen on stdout.
This is so we don't get encoding errors when trying to print unicode
emojis to stdout (particularly with Windows Command Prompt).
Returns
-------
`tuple`
Tuple in the form (`str`, `str`, `bool`) containing (in order) the
on symbol, off symbol and whether or not the border should be pure ascii.
"""
encoder = codecs.getencoder(sys.stdout.encoding)
check_mark = "\N{SQUARE ROOT}"
try:
encoder(check_mark)
except UnicodeEncodeError:
on_symbol = "[X]"
off_symbol = "[ ]"
else:
on_symbol = check_mark
off_symbol = "X"
try:
encoder("┌┐└┘─│") # border symbols
except UnicodeEncodeError:
ascii_border = True
else:
ascii_border = False
return on_symbol, off_symbol, ascii_border | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/events.py | events.py |
import functools
import json
import os
import asyncio
import logging
from copy import deepcopy
from uuid import uuid4
# This is basically our old DataIO and just a base for much more elaborate classes
# This still isn't completely threadsafe, (do not use config in threads)
from pathlib import Path
log = logging.getLogger("an")
PRETTY = {"indent": 4, "sort_keys": False, "separators": (",", " : ")}
MINIFIED = {"sort_keys": False, "separators": (",", ":")}
class JsonIO:
"""Basic functions for atomic saving / loading of json files"""
def __init__(self, path: Path = Path.cwd()):
"""
:param path: Full path to file.
"""
self._lock = asyncio.Lock()
self.path = path
# noinspection PyUnresolvedReferences
def _save_json(self, data, settings=PRETTY):
"""
This fsync stuff here is entirely neccessary.
On windows, it is not available in entirety.
If a windows user ends up with tons of temp files, they should consider hosting on
something POSIX compatible, or using the mongo backend instead.
Most users wont encounter this issue, but with high write volumes,
without the fsync on both the temp file, and after the replace on the directory,
There's no real durability or atomicity guarantee from the filesystem.
In depth overview of underlying reasons why this is needed:
https://lwn.net/Articles/457667/
Also see:
http://man7.org/linux/man-pages/man2/open.2.html#NOTES (synchronous I/O section)
And:
https://www.mjmwired.net/kernel/Documentation/filesystems/ext4.txt#310
"""
log.debug("Saving file {}".format(self.path))
filename = self.path.stem
tmp_file = "{}-{}.tmp".format(filename, uuid4().fields[0])
tmp_path = self.path.parent / tmp_file
with tmp_path.open(encoding="utf-8", mode="w") as f:
json.dump(data, f, **settings)
f.flush() # This does get closed on context exit, ...
os.fsync(f.fileno()) # but that needs to happen prior to this line
tmp_path.replace(self.path)
# pylint: disable=E1101
try:
fd = os.open(self.path.parent, os.O_DIRECTORY)
os.fsync(fd)
except AttributeError:
fd = None
finally:
if fd is not None:
os.close(fd)
async def _threadsafe_save_json(self, data, settings=PRETTY):
loop = asyncio.get_event_loop()
# the deepcopy is needed here. otherwise,
# the dict can change during serialization
# and this will break the encoder.
data_copy = deepcopy(data)
func = functools.partial(self._save_json, data_copy, settings)
async with self._lock:
await loop.run_in_executor(None, func)
# noinspection PyUnresolvedReferences
def _load_json(self):
log.debug("Reading file {}".format(self.path))
with self.path.open(encoding="utf-8", mode="r") as f:
data = json.load(f)
return data
async def _threadsafe_load_json(self, path):
loop = asyncio.get_event_loop()
func = functools.partial(self._load_json, path)
async with self._lock:
return await loop.run_in_executor(None, func) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/json_io.py | json_io.py |
import os
import re
from pathlib import Path
from typing import Callable, Union
__all__ = ["get_locale", "set_locale", "reload_locales", "cog_i18n", "Translator"]
_current_locale = "en_us"
WAITING_FOR_MSGID = 1
IN_MSGID = 2
WAITING_FOR_MSGSTR = 3
IN_MSGSTR = 4
MSGID = 'msgid "'
MSGSTR = 'msgstr "'
_translators = []
def get_locale():
return _current_locale
def set_locale(locale):
global _current_locale
_current_locale = locale
reload_locales()
def reload_locales():
for translator in _translators:
translator.load_translations()
def _parse(translation_file):
"""
Custom gettext parsing of translation files. All credit for this code goes
to ProgVal/Valentin Lorentz and the Limnoria project.
https://github.com/ProgVal/Limnoria/blob/master/src/i18n.py
:param translation_file:
An open file-like object containing translations.
:return:
A set of 2-tuples containing the original string and the translated version.
"""
step = WAITING_FOR_MSGID
translations = set()
for line in translation_file:
line = line[0:-1] # Remove the ending \n
line = line
if line.startswith(MSGID):
# Don't check if step is WAITING_FOR_MSGID
untranslated = ""
translated = ""
data = line[len(MSGID) : -1]
if len(data) == 0: # Multiline mode
step = IN_MSGID
else:
untranslated += data
step = WAITING_FOR_MSGSTR
elif step is IN_MSGID and line.startswith('"') and line.endswith('"'):
untranslated += line[1:-1]
elif step is IN_MSGID and untranslated == "": # Empty MSGID
step = WAITING_FOR_MSGID
elif step is IN_MSGID: # the MSGID is finished
step = WAITING_FOR_MSGSTR
if step is WAITING_FOR_MSGSTR and line.startswith(MSGSTR):
data = line[len(MSGSTR) : -1]
if len(data) == 0: # Multiline mode
step = IN_MSGSTR
else:
translations |= {(untranslated, data)}
step = WAITING_FOR_MSGID
elif step is IN_MSGSTR and line.startswith('"') and line.endswith('"'):
translated += line[1:-1]
elif step is IN_MSGSTR: # the MSGSTR is finished
step = WAITING_FOR_MSGID
if translated == "":
translated = untranslated
translations |= {(untranslated, translated)}
if step is IN_MSGSTR:
if translated == "":
translated = untranslated
translations |= {(untranslated, translated)}
return translations
def _normalize(string, remove_newline=False):
"""
String normalization.
All credit for this code goes
to ProgVal/Valentin Lorentz and the Limnoria project.
https://github.com/ProgVal/Limnoria/blob/master/src/i18n.py
:param string:
:param remove_newline:
:return:
"""
def normalize_whitespace(s):
"""Normalizes the whitespace in a string; \s+ becomes one space."""
if not s:
return str(s) # not the same reference
starts_with_space = s[0] in " \n\t\r"
ends_with_space = s[-1] in " \n\t\r"
if remove_newline:
newline_re = re.compile("[\r\n]+")
s = " ".join(filter(None, newline_re.split(s)))
s = " ".join(filter(None, s.split("\t")))
s = " ".join(filter(None, s.split(" ")))
if starts_with_space:
s = " " + s
if ends_with_space:
s += " "
return s
if string is None:
return ""
string = string.replace("\\n\\n", "\n\n")
string = string.replace("\\n", " ")
string = string.replace('\\"', '"')
string = string.replace("'", "'")
string = normalize_whitespace(string)
string = string.strip("\n")
string = string.strip("\t")
return string
def get_locale_path(cog_folder: Path, extension: str) -> Path:
"""
Gets the folder path containing localization files.
:param Path cog_folder:
The cog folder that we want localizations for.
:param str extension:
Extension of localization files.
:return:
Path of possible localization file, it may not exist.
"""
return cog_folder / "locales" / "{}.{}".format(get_locale(), extension)
class Translator(Callable[[str], str]):
"""Function to get translated strings at runtime."""
def __init__(self, name: str, file_location: Union[str, Path, os.PathLike]):
"""
Initializes an internationalization object.
Parameters
----------
name : str
Your cog name.
file_location : `str` or `pathlib.Path`
This should always be ``__file__`` otherwise your localizations
will not load.
"""
self.cog_folder = Path(file_location).resolve().parent
self.cog_name = name
self.translations = {}
_translators.append(self)
self.load_translations()
def __call__(self, untranslated: str) -> str:
"""Translate the given string.
This will look for the string in the translator's :code:`.pot` file,
with respect to the current locale.
"""
normalized_untranslated = _normalize(untranslated, True)
try:
return self.translations[normalized_untranslated]
except KeyError:
return untranslated
def load_translations(self):
"""
Loads the current translations.
"""
self.translations = {}
translation_file = None
locale_path = get_locale_path(self.cog_folder, "po")
try:
try:
translation_file = locale_path.open("ru", encoding="utf-8")
except ValueError: # We are using Windows
translation_file = locale_path.open("r", encoding="utf-8")
self._parse(translation_file)
except (IOError, FileNotFoundError): # The translation is unavailable
pass
finally:
if translation_file is not None:
translation_file.close()
def _parse(self, translation_file):
self.translations = {}
for translation in _parse(translation_file):
self._add_translation(*translation)
def _add_translation(self, untranslated, translated):
untranslated = _normalize(untranslated, True)
translated = _normalize(translated)
if translated:
self.translations.update({untranslated: translated})
# This import to be down here to avoid circular import issues.
# This will be cleaned up at a later date
# noinspection PyPep8
from . import commands
def cog_i18n(translator: Translator):
"""Get a class decorator to link the translator to this cog."""
def decorator(cog_class: type):
cog_class.__translator__ = translator
for name, attr in cog_class.__dict__.items():
if isinstance(attr, (commands.Group, commands.Command)):
attr.translator = translator
setattr(cog_class, name, attr)
return cog_class
return decorator | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/i18n.py | i18n.py |
import asyncio
import inspect
import os
import logging
from collections import Counter
from enum import Enum
from importlib.machinery import ModuleSpec
from pathlib import Path
from typing import Optional, Union, List
import discord
import sys
from discord.ext.commands import when_mentioned_or
from . import Config, i18n, commands, errors
from .cog_manager import CogManager
from .help_formatter import Help, help as help_
from .rpc import RPCMixin
from .sentry import SentryManager
from .utils import common_filters
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
class ANBase(commands.GroupMixin, commands.bot.BotBase, RPCMixin):
"""Mixin for the main bot class.
This exists because `AN` inherits from `discord.AutoShardedClient`, which
is something other bot classes (namely selfbots) may not want to have as
a parent class.
Selfbots should inherit from this mixin along with `discord.Client`.
"""
def __init__(self, *args, cli_flags=None, bot_dir: Path = Path.cwd(), **kwargs):
self._shutdown_mode = ExitCodes.CRITICAL
self.db = Config.get_core_conf(force_registration=True)
self._co_owners = cli_flags.co_owner
self.rpc_enabled = cli_flags.rpc
self._last_exception = None
self.db.register_global(
token=None,
prefix=[],
packages=[],
owner=None,
whitelist=[],
blacklist=[],
enable_sentry=None,
locale="en",
embeds=True,
color=15158332,
fuzzy=False,
help__page_char_limit=1000,
help__max_pages_in_guild=2,
help__tagline="",
disabled_commands=[],
disabled_command_msg="That command is disabled.",
)
self.db.register_guild(
prefix=[],
whitelist=[],
blacklist=[],
admin_role=None,
mod_role=None,
embeds=None,
use_bot_color=False,
fuzzy=False,
disabled_commands=[],
autoimmune_ids=[],
)
self.db.register_user(embeds=None)
async def prefix_manager(bot, message):
if not cli_flags.prefix:
global_prefix = await bot.db.prefix()
else:
global_prefix = cli_flags.prefix
if message.guild is None:
return global_prefix
server_prefix = await bot.db.guild(message.guild).prefix()
if cli_flags.mentionable:
return (
when_mentioned_or(*server_prefix)(bot, message)
if server_prefix
else when_mentioned_or(*global_prefix)(bot, message)
)
else:
return server_prefix if server_prefix else global_prefix
if "command_prefix" not in kwargs:
kwargs["command_prefix"] = prefix_manager
if cli_flags.owner and "owner_id" not in kwargs:
kwargs["owner_id"] = cli_flags.owner
if "owner_id" not in kwargs:
loop = asyncio.get_event_loop()
loop.run_until_complete(self._dict_abuse(kwargs))
if "command_not_found" not in kwargs:
kwargs["command_not_found"] = "Command {} not found.\n{}"
self.counter = Counter()
self.uptime = None
self.checked_time_accuracy = None
self.color = discord.Embed.Empty # This is needed or color ends up 0x000000
self.main_dir = bot_dir
self.cog_mgr = CogManager()
super().__init__(*args, formatter=Help(), **kwargs)
self.remove_command("help")
self.add_command(help_)
self._sentry_mgr = None
self._permissions_hooks: List[commands.CheckPredicate] = []
def enable_sentry(self):
"""Enable Sentry logging for AN."""
if self._sentry_mgr is None:
sentry_log = logging.getLogger("anbot.sentry")
sentry_log.setLevel(logging.WARNING)
self._sentry_mgr = SentryManager(sentry_log)
self._sentry_mgr.enable()
def disable_sentry(self):
"""Disable Sentry logging for AN."""
if self._sentry_mgr is None:
return
self._sentry_mgr.disable()
async def _dict_abuse(self, indict):
"""
Please blame <@269933075037814786> for this.
:param indict:
:return:
"""
indict["owner_id"] = await self.db.owner()
i18n.set_locale(await self.db.locale())
async def embed_requested(self, channel, user, command=None) -> bool:
"""
Determine if an embed is requested for a response.
Parameters
----------
channel : `discord.abc.GuildChannel` or `discord.abc.PrivateChannel`
The channel to check embed settings for.
user : `discord.abc.User`
The user to check embed settings for.
command
(Optional) the command ran.
Returns
-------
bool
:code:`True` if an embed is requested
"""
if isinstance(channel, discord.abc.PrivateChannel) or (
command and command == self.get_command("help")
):
user_setting = await self.db.user(user).embeds()
if user_setting is not None:
return user_setting
else:
guild_setting = await self.db.guild(channel.guild).embeds()
if guild_setting is not None:
return guild_setting
global_setting = await self.db.embeds()
return global_setting
async def is_owner(self, user):
if user.id in self._co_owners:
return True
return await super().is_owner(user)
async def is_admin(self, member: discord.Member):
"""Checks if a member is an admin of their guild."""
admin_role = await self.db.guild(member.guild).admin_role()
try:
if any(role.id == admin_role for role in member.roles):
return True
except AttributeError: # someone passed a webhook to this
pass
return False
async def is_mod(self, member: discord.Member):
"""Checks if a member is a mod or admin of their guild."""
mod_role = await self.db.guild(member.guild).mod_role()
admin_role = await self.db.guild(member.guild).admin_role()
try:
if any(role.id in (mod_role, admin_role) for role in member.roles):
return True
except AttributeError: # someone passed a webhook to this
pass
return False
async def get_context(self, message, *, cls=commands.Context):
return await super().get_context(message, cls=cls)
@staticmethod
def list_packages():
"""Lists packages present in the cogs the folder"""
return os.listdir("cogs")
async def save_packages_status(self, packages):
await self.db.packages.set(packages)
async def add_loaded_package(self, pkg_name: str):
async with self.db.packages() as curr_pkgs:
if pkg_name not in curr_pkgs:
curr_pkgs.append(pkg_name)
async def remove_loaded_package(self, pkg_name: str):
async with self.db.packages() as curr_pkgs:
while pkg_name in curr_pkgs:
curr_pkgs.remove(pkg_name)
async def load_extension(self, spec: ModuleSpec):
name = spec.name.split(".")[-1]
if name in self.extensions:
raise errors.PackageAlreadyLoaded(spec)
lib = spec.loader.load_module()
if not hasattr(lib, "setup"):
del lib
raise discord.ClientException(f"extension {name} does not have a setup function")
if asyncio.iscoroutinefunction(lib.setup):
await lib.setup(self)
else:
lib.setup(self)
self.extensions[name] = lib
def remove_cog(self, cogname: str):
cog = self.get_cog(cogname)
if cog is None:
return
for cls in inspect.getmro(cog.__class__):
try:
hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
except AttributeError:
pass
else:
self.remove_permissions_hook(hook)
super().remove_cog(cogname)
for meth in self.rpc_handlers.pop(cogname.upper(), ()):
self.unregister_rpc_handler(meth)
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
lib_name = lib.__name__ # Thank you
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if cog.__module__ and _is_submodule(lib_name, cog.__module__):
self.remove_cog(cogname)
# first remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module and _is_submodule(lib_name, cmd.module):
if isinstance(cmd, discord.ext.commands.GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if event.__module__ and _is_submodule(lib_name, event.__module__):
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, "teardown")
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
pkg_name = lib.__package__
del lib
del self.extensions[name]
for module in list(sys.modules):
if _is_submodule(lib_name, module):
del sys.modules[module]
if pkg_name.startswith("anbot.cogs."):
del sys.modules["anbot.cogs"].__dict__[name]
async def is_automod_immune(
self, to_check: Union[discord.Message, commands.Context, discord.abc.User, discord.Role]
) -> bool:
"""
Checks if the user, message, context, or role should be considered immune from automated
moderation actions.
This will return ``False`` in direct messages.
Parameters
----------
to_check : `discord.Message` or `commands.Context` or `discord.abc.User` or `discord.Role`
Something to check if it would be immune
Returns
-------
bool
``True`` if immune
"""
guild = to_check.guild
if not guild:
return False
if isinstance(to_check, discord.Role):
ids_to_check = [to_check.id]
else:
author = getattr(to_check, "author", to_check)
try:
ids_to_check = [r.id for r in author.roles]
except AttributeError:
# webhook messages are a user not member,
# cheaper than isinstance
return True # webhooks require significant permissions to enable.
else:
ids_to_check.append(author.id)
immune_ids = await self.db.guild(guild).autoimmune_ids()
return any(i in immune_ids for i in ids_to_check)
@staticmethod
async def send_filtered(
destination: discord.abc.Messageable,
filter_mass_mentions=True,
filter_invite_links=True,
filter_all_links=False,
**kwargs,
):
"""
This is a convienience wrapper around
discord.abc.Messageable.send
It takes the destination you'd like to send to, which filters to apply
(defaults on mass mentions, and invite links) and any other parameters
normally accepted by destination.send
This should realistically only be used for responding using user provided
input. (unfortunately, including usernames)
Manually crafted messages which dont take any user input have no need of this
"""
content = kwargs.pop("content", None)
if content:
if filter_mass_mentions:
content = common_filters.filter_mass_mentions(content)
if filter_invite_links:
content = common_filters.filter_invites(content)
if filter_all_links:
content = common_filters.filter_urls(content)
await destination.send(content=content, **kwargs)
def add_cog(self, cog: commands.Cog):
if not isinstance(cog, commands.Cog):
raise RuntimeError(
f"The {cog.__class__.__name__} cog in the {cog.__module__} package does "
f"not inherit from the commands.Cog base class. The cog author must update "
f"the cog to adhere to this requirement."
)
if not hasattr(cog, "requires"):
commands.Cog.__init__(cog)
for cls in inspect.getmro(cog.__class__):
try:
hook = getattr(cog, f"_{cls.__name__}__permissions_hook")
except AttributeError:
pass
else:
self.add_permissions_hook(hook)
for attr in dir(cog):
_attr = getattr(cog, attr)
if isinstance(_attr, discord.ext.commands.Command) and not isinstance(
_attr, commands.Command
):
raise RuntimeError(
f"The {cog.__class__.__name__} cog in the {cog.__module__} package,"
" is not using AN's command module, and cannot be added. "
"If this is your cog, please use `from anbot.core import commands`"
"in place of `from discord.ext import commands`. For more details on "
"this requirement, see this page: "
"http://red-discordbot.readthedocs.io/en/v3-develop/framework_commands.html"
)
super().add_cog(cog)
self.dispatch("cog_add", cog)
def add_command(self, command: commands.Command):
if not isinstance(command, commands.Command):
raise TypeError("Command objects must derive from anbot.core.commands.Command")
super().add_command(command)
self.dispatch("command_add", command)
def clear_permission_rules(self, guild_id: Optional[int]) -> None:
"""Clear all permission overrides in a scope.
Parameters
----------
guild_id : Optional[int]
The guild ID to wipe permission overrides for. If
``None``, this will clear all global rules and leave all
guild rules untouched.
"""
for cog in self.cogs.values():
cog.requires.clear_all_rules(guild_id)
for command in self.walk_commands():
command.requires.clear_all_rules(guild_id)
def add_permissions_hook(self, hook: commands.CheckPredicate) -> None:
"""Add a permissions hook.
Permissions hooks are check predicates which are called before
calling `Requires.verify`, and they can optionally return an
override: ``True`` to allow, ``False`` to deny, and ``None`` to
default to normal behaviour.
Parameters
----------
hook
A command check predicate which returns ``True``, ``False``
or ``None``.
"""
self._permissions_hooks.append(hook)
def remove_permissions_hook(self, hook: commands.CheckPredicate) -> None:
"""Remove a permissions hook.
Parameters are the same as those in `add_permissions_hook`.
Raises
------
ValueError
If the permissions hook has not been added.
"""
self._permissions_hooks.remove(hook)
async def verify_permissions_hooks(self, ctx: commands.Context) -> Optional[bool]:
"""Run permissions hooks.
Parameters
----------
ctx : commands.Context
The context for the command being invoked.
Returns
-------
Optional[bool]
``False`` if any hooks returned ``False``, ``True`` if any
hooks return ``True`` and none returned ``False``, ``None``
otherwise.
"""
hook_results = []
for hook in self._permissions_hooks:
result = await discord.utils.maybe_coroutine(hook, ctx)
if result is not None:
hook_results.append(result)
if hook_results:
if all(hook_results):
ctx.permission_state = commands.PermState.ALLOWED_BY_HOOK
return True
else:
ctx.permission_state = commands.PermState.DENIED_BY_HOOK
return False
class AN(ANBase, discord.AutoShardedClient):
"""
You're welcome Caleb.
"""
async def logout(self):
"""Logs out of Discord and closes all connections."""
if self._sentry_mgr:
await self._sentry_mgr.close()
await super().logout()
async def shutdown(self, *, restart: bool = False):
"""Gracefully quit AN.
The program will exit with code :code:`0` by default.
Parameters
----------
restart : bool
If :code:`True`, the program will exit with code :code:`26`. If the
launcher sees this, it will attempt to restart the bot.
"""
if not restart:
self._shutdown_mode = ExitCodes.SHUTDOWN
else:
self._shutdown_mode = ExitCodes.RESTART
await self.logout()
class ExitCodes(Enum):
CRITICAL = 1
SHUTDOWN = 0
RESTART = 26 | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/bot.py | bot.py |
import argparse
import asyncio
from anbot.core.bot import AN
def confirm(m=""):
return input(m).lower().strip() in ("y", "yes")
def interactive_config(red, token_set, prefix_set):
loop = asyncio.get_event_loop()
token = ""
print("AN - Discord Bot | Configuration process\n")
if not token_set:
print("Please enter a valid token:")
while not token:
token = input("> ")
if not len(token) >= 50:
print("That doesn't look like a valid token.")
token = ""
if token:
loop.run_until_complete(red.db.token.set(token))
if not prefix_set:
prefix = ""
print(
"\nPick a prefix. A prefix is what you type before a "
"command. Example:\n"
"!help\n^ The exclamation mark is the prefix in this case.\n"
"The prefix can be multiple characters. You will be able to change it "
"later and add more of them.\nChoose your prefix:\n"
)
while not prefix:
prefix = input("Prefix> ")
if len(prefix) > 10:
print("Your prefix seems overly long. Are you sure that it's correct? (y/n)")
if not confirm("> "):
prefix = ""
if prefix:
loop.run_until_complete(red.db.prefix.set([prefix]))
ask_sentry(red)
return token
def ask_sentry(red: AN):
loop = asyncio.get_event_loop()
print(
"\nThank you for installing AN V3! AN is constantly undergoing\n"
" improvements, and we would like to ask if you are comfortable with\n"
" the bot automatically submitting fatal error logs to the development\n"
' team. If you wish to opt into the process please type "yes":\n'
)
if not confirm("> "):
loop.run_until_complete(red.db.enable_sentry.set(False))
else:
loop.run_until_complete(red.db.enable_sentry.set(True))
print("\nThank you for helping us with the development process!")
def parse_cli_flags(args):
parser = argparse.ArgumentParser(
description="AN - Discord Bot", usage="anbot <instance_name> [arguments]"
)
parser.add_argument("--version", "-V", action="store_true", help="Show AN's current version")
parser.add_argument(
"--list-instances",
action="store_true",
help="List all instance names setup with 'anbot-setup'",
)
parser.add_argument(
"--owner",
type=int,
help="ID of the owner. Only who hosts "
"AN should be owner, this has "
"serious security implications if misused.",
)
parser.add_argument(
"--co-owner",
type=int,
default=[],
nargs="*",
help="ID of a co-owner. Only people who have access "
"to the system that is hosting AN should be "
"co-owners, as this gives them complete access "
"to the system's data. This has serious "
"security implications if misused. Can be "
"multiple.",
)
parser.add_argument("--prefix", "-p", action="append", help="Global prefix. Can be multiple")
parser.add_argument(
"--no-prompt",
action="store_true",
help="Disables console inputs. Features requiring "
"console interaction could be disabled as a "
"result",
)
parser.add_argument(
"--no-cogs", action="store_true", help="Starts AN with no cogs loaded, only core"
)
parser.add_argument(
"--load-cogs",
type=str,
nargs="*",
help="Force loading specified cogs from the installed packages. "
"Can be used with the --no-cogs flag to load these cogs exclusively.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Makes AN quit with code 0 just before the "
"login. This is useful for testing the boot "
"process.",
)
parser.add_argument("--debug", action="store_true", help="Sets the loggers level as debug")
parser.add_argument("--dev", action="store_true", help="Enables developer mode")
parser.add_argument(
"--mentionable",
action="store_true",
help="Allows mentioning the bot as an alternative to using the bot prefix",
)
parser.add_argument(
"--rpc",
action="store_true",
help="Enables the built-in RPC server. Please read the docs prior to enabling this!",
)
parser.add_argument("--token", type=str, help="Run AN with the given token.")
parser.add_argument(
"--no-instance",
action="store_true",
help=(
"Run AN without any existing instance. "
"The data will be saved under a temporary folder "
"and deleted on next system restart."
),
)
parser.add_argument(
"instance_name", nargs="?", help="Name of the bot instance created during `anbot-setup`."
)
args = parser.parse_args(args)
if args.prefix:
args.prefix = sorted(args.prefix, reverse=True)
else:
args.prefix = []
return args | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/cli.py | cli.py |
import re
from typing import Match, Pattern
from urllib.parse import quote_plus
import motor.core
import motor.motor_asyncio
from .red_base import BaseDriver
__all__ = ["Mongo"]
_conn = None
def _initialize(**kwargs):
uri = kwargs.get("URI", "mongodb")
host = kwargs["HOST"]
port = kwargs["PORT"]
admin_user = kwargs["USERNAME"]
admin_pass = kwargs["PASSWORD"]
db_name = kwargs.get("DB_NAME", "default_db")
if port is 0:
ports = ""
else:
ports = ":{}".format(port)
if admin_user is not None and admin_pass is not None:
url = "{}://{}:{}@{}{}/{}".format(
uri, quote_plus(admin_user), quote_plus(admin_pass), host, ports, db_name
)
else:
url = "{}://{}{}/{}".format(uri, host, ports, db_name)
global _conn
_conn = motor.motor_asyncio.AsyncIOMotorClient(url)
class Mongo(BaseDriver):
"""
Subclass of :py:class:`.red_base.BaseDriver`.
"""
def __init__(self, cog_name, identifier, **kwargs):
super().__init__(cog_name, identifier)
if _conn is None:
_initialize(**kwargs)
@property
def db(self) -> motor.core.Database:
"""
Gets the mongo database for this cog's name.
.. warning::
Right now this will cause a new connection to be made every time the
database is accessed. We will want to create a connection pool down the
line to limit the number of connections.
:return:
PyMongo Database object.
"""
return _conn.get_database()
def get_collection(self) -> motor.core.Collection:
"""
Gets a specified collection within the PyMongo database for this cog.
Unless you are doing custom stuff ``collection_name`` should be one of the class
attributes of :py:class:`core.config.Config`.
:param str collection_name:
:return:
PyMongo collection object.
"""
return self.db[self.cog_name]
@staticmethod
def _parse_identifiers(identifiers):
uuid, identifiers = identifiers[0], identifiers[1:]
return uuid, identifiers
async def get(self, *identifiers: str):
mongo_collection = self.get_collection()
identifiers = (*map(self._escape_key, identifiers),)
dot_identifiers = ".".join(identifiers)
partial = await mongo_collection.find_one(
filter={"_id": self.unique_cog_identifier}, projection={dot_identifiers: True}
)
if partial is None:
raise KeyError("No matching document was found and Config expects a KeyError.")
for i in identifiers:
partial = partial[i]
if isinstance(partial, dict):
return self._unescape_dict_keys(partial)
return partial
async def set(self, *identifiers: str, value=None):
dot_identifiers = ".".join(map(self._escape_key, identifiers))
if isinstance(value, dict):
value = self._escape_dict_keys(value)
mongo_collection = self.get_collection()
await mongo_collection.update_one(
{"_id": self.unique_cog_identifier},
update={"$set": {dot_identifiers: value}},
upsert=True,
)
async def clear(self, *identifiers: str):
dot_identifiers = ".".join(map(self._escape_key, identifiers))
mongo_collection = self.get_collection()
if len(identifiers) > 0:
await mongo_collection.update_one(
{"_id": self.unique_cog_identifier}, update={"$unset": {dot_identifiers: 1}}
)
else:
await mongo_collection.delete_one({"_id": self.unique_cog_identifier})
@staticmethod
def _escape_key(key: str) -> str:
return _SPECIAL_CHAR_PATTERN.sub(_replace_with_escaped, key)
@staticmethod
def _unescape_key(key: str) -> str:
return _CHAR_ESCAPE_PATTERN.sub(_replace_with_unescaped, key)
@classmethod
def _escape_dict_keys(cls, data: dict) -> dict:
"""Recursively escape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._escape_key(key)
if isinstance(value, dict):
value = cls._escape_dict_keys(value)
ret[key] = value
return ret
@classmethod
def _unescape_dict_keys(cls, data: dict) -> dict:
"""Recursively unescape all keys in a dict."""
ret = {}
for key, value in data.items():
key = cls._unescape_key(key)
if isinstance(value, dict):
value = cls._unescape_dict_keys(value)
ret[key] = value
return ret
_SPECIAL_CHAR_PATTERN: Pattern[str] = re.compile(r"([.$]|\\U0000002E|\\U00000024)")
_SPECIAL_CHARS = {
".": "\\U0000002E",
"$": "\\U00000024",
"\\U0000002E": "\\U&0000002E",
"\\U00000024": "\\U&00000024",
}
def _replace_with_escaped(match: Match[str]) -> str:
return _SPECIAL_CHARS[match[0]]
_CHAR_ESCAPE_PATTERN: Pattern[str] = re.compile(r"(\\U0000002E|\\U00000024)")
_CHAR_ESCAPES = {
"\\U0000002E": ".",
"\\U00000024": "$",
"\\U&0000002E": "\\U0000002E",
"\\U&00000024": "\\U00000024",
}
def _replace_with_unescaped(match: Match[str]) -> str:
return _CHAR_ESCAPES[match[0]]
def get_config_details():
uri = None
while True:
uri = input("Enter URI scheme (mongodb or mongodb+srv): ")
if uri is "":
uri = "mongodb"
if uri in ["mongodb", "mongodb+srv"]:
break
else:
print("Invalid URI scheme")
host = input("Enter host address: ")
if uri is "mongodb":
port = int(input("Enter host port: "))
else:
port = 0
admin_uname = input("Enter login username: ")
admin_password = input("Enter login password: ")
db_name = input("Enter mongodb database name: ")
if admin_uname == "":
admin_uname = admin_password = None
ret = {
"HOST": host,
"PORT": port,
"USERNAME": admin_uname,
"PASSWORD": admin_password,
"DB_NAME": db_name,
"URI": uri,
}
return ret | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/drivers/red_mongo.py | red_mongo.py |
from pathlib import Path
from typing import Tuple
import copy
import weakref
import logging
from ..json_io import JsonIO
from .red_base import BaseDriver
__all__ = ["JSON"]
_shared_datastore = {}
_driver_counts = {}
_finalizers = []
log = logging.getLogger("redbot.json_driver")
def finalize_driver(cog_name):
if cog_name not in _driver_counts:
return
_driver_counts[cog_name] -= 1
if _driver_counts[cog_name] == 0:
if cog_name in _shared_datastore:
del _shared_datastore[cog_name]
for f in _finalizers:
if not f.alive:
_finalizers.remove(f)
class JSON(BaseDriver):
"""
Subclass of :py:class:`.red_base.BaseDriver`.
.. py:attribute:: file_name
The name of the file in which to store JSON data.
.. py:attribute:: data_path
The path in which to store the file indicated by :py:attr:`file_name`.
"""
def __init__(
self,
cog_name,
identifier,
*,
data_path_override: Path = None,
file_name_override: str = "settings.json"
):
super().__init__(cog_name, identifier)
self.file_name = file_name_override
if data_path_override:
self.data_path = data_path_override
else:
self.data_path = Path.cwd() / "cogs" / ".data" / self.cog_name
self.data_path.mkdir(parents=True, exist_ok=True)
self.data_path = self.data_path / self.file_name
self.jsonIO = JsonIO(self.data_path)
self._load_data()
@property
def data(self):
return _shared_datastore.get(self.cog_name)
@data.setter
def data(self, value):
_shared_datastore[self.cog_name] = value
def _load_data(self):
if self.cog_name not in _driver_counts:
_driver_counts[self.cog_name] = 0
_driver_counts[self.cog_name] += 1
_finalizers.append(weakref.finalize(self, finalize_driver, self.cog_name))
if self.data is not None:
return
try:
self.data = self.jsonIO._load_json()
except FileNotFoundError:
self.data = {}
self.jsonIO._save_json(self.data)
async def get(self, *identifiers: Tuple[str]):
partial = self.data
full_identifiers = (self.unique_cog_identifier, *identifiers)
for i in full_identifiers:
partial = partial[i]
return copy.deepcopy(partial)
async def set(self, *identifiers: str, value=None):
partial = self.data
full_identifiers = (self.unique_cog_identifier, *identifiers)
for i in full_identifiers[:-1]:
if i not in partial:
partial[i] = {}
partial = partial[i]
partial[full_identifiers[-1]] = copy.deepcopy(value)
await self.jsonIO._threadsafe_save_json(self.data)
async def clear(self, *identifiers: str):
partial = self.data
full_identifiers = (self.unique_cog_identifier, *identifiers)
try:
for i in full_identifiers[:-1]:
partial = partial[i]
del partial[full_identifiers[-1]]
except KeyError:
pass
else:
await self.jsonIO._threadsafe_save_json(self.data)
def get_config_details(self):
return | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/drivers/red_json.py | red_json.py |
import asyncio
import contextlib
from typing import Union, Iterable, Optional
import discord
from .. import commands
from .predicates import ReactionPredicate
_ReactableEmoji = Union[str, discord.Emoji]
async def menu(
ctx: commands.Context,
pages: list,
controls: dict,
message: discord.Message = None,
page: int = 0,
timeout: float = 30.0,
):
"""
An emoji-based menu
.. note:: All pages should be of the same type
.. note:: All functions for handling what a particular emoji does
should be coroutines (i.e. :code:`async def`). Additionally,
they must take all of the parameters of this function, in
addition to a string representing the emoji reacted with.
This parameter should be the last one, and none of the
parameters in the handling functions are optional
Parameters
----------
ctx: commands.Context
The command context
pages: `list` of `str` or `discord.Embed`
The pages of the menu.
controls: dict
A mapping of emoji to the function which handles the action for the
emoji.
message: discord.Message
The message representing the menu. Usually :code:`None` when first opening
the menu
page: int
The current page number of the menu
timeout: float
The time (in seconds) to wait for a reaction
Raises
------
RuntimeError
If either of the notes above are violated
"""
if not all(isinstance(x, discord.Embed) for x in pages) and not all(
isinstance(x, str) for x in pages
):
raise RuntimeError("All pages must be of the same type")
for key, value in controls.items():
if not asyncio.iscoroutinefunction(value):
raise RuntimeError("Function must be a coroutine")
current_page = pages[page]
if not message:
if isinstance(current_page, discord.Embed):
message = await ctx.send(embed=current_page)
else:
message = await ctx.send(current_page)
# Don't wait for reactions to be added (GH-1797)
# noinspection PyAsyncCall
start_adding_reactions(message, controls.keys(), ctx.bot.loop)
else:
try:
if isinstance(current_page, discord.Embed):
await message.edit(embed=current_page)
else:
await message.edit(content=current_page)
except discord.NotFound:
return
try:
react, user = await ctx.bot.wait_for(
"reaction_add",
check=ReactionPredicate.with_emojis(tuple(controls.keys()), message, ctx.author),
timeout=timeout,
)
except asyncio.TimeoutError:
try:
await message.clear_reactions()
except discord.Forbidden: # cannot remove all reactions
for key in controls.keys():
await message.remove_reaction(key, ctx.bot.user)
except discord.NotFound:
return
else:
return await controls[react.emoji](
ctx, pages, controls, message, page, timeout, react.emoji
)
async def next_page(
ctx: commands.Context,
pages: list,
controls: dict,
message: discord.Message,
page: int,
timeout: float,
emoji: str,
):
perms = message.channel.permissions_for(ctx.me)
if perms.manage_messages: # Can manage messages, so remove react
with contextlib.suppress(discord.NotFound):
await message.remove_reaction(emoji, ctx.author)
if page == len(pages) - 1:
page = 0 # Loop around to the first item
else:
page = page + 1
return await menu(ctx, pages, controls, message=message, page=page, timeout=timeout)
async def prev_page(
ctx: commands.Context,
pages: list,
controls: dict,
message: discord.Message,
page: int,
timeout: float,
emoji: str,
):
perms = message.channel.permissions_for(ctx.me)
if perms.manage_messages: # Can manage messages, so remove react
with contextlib.suppress(discord.NotFound):
await message.remove_reaction(emoji, ctx.author)
if page == 0:
page = len(pages) - 1 # Loop around to the last item
else:
page = page - 1
return await menu(ctx, pages, controls, message=message, page=page, timeout=timeout)
async def close_menu(
ctx: commands.Context,
pages: list,
controls: dict,
message: discord.Message,
page: int,
timeout: float,
emoji: str,
):
with contextlib.suppress(discord.NotFound):
await message.delete()
def start_adding_reactions(
message: discord.Message,
emojis: Iterable[_ReactableEmoji],
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> asyncio.Task:
"""Start adding reactions to a message.
This is a non-blocking operation - calling this will schedule the
reactions being added, but the calling code will continue to
execute asynchronously. There is no need to await this function.
This is particularly useful if you wish to start waiting for a
reaction whilst the reactions are still being added - in fact,
this is exactly what `menu` uses to do that.
This spawns a `asyncio.Task` object and schedules it on ``loop``.
If ``loop`` omitted, the loop will be retrieved with
`asyncio.get_event_loop`.
Parameters
----------
message: discord.Message
The message to add reactions to.
emojis : Iterable[Union[str, discord.Emoji]]
The emojis to react to the message with.
loop : Optional[asyncio.AbstractEventLoop]
The event loop.
Returns
-------
asyncio.Task
The task for the coroutine adding the reactions.
"""
async def task():
# The task should exit silently if the message is deleted
with contextlib.suppress(discord.NotFound):
for emoji in emojis:
await message.add_reaction(emoji)
if loop is None:
loop = asyncio.get_event_loop()
return loop.create_task(task())
DEFAULT_CONTROLS = {"⬅": prev_page, "❌": close_menu, "➡": next_page} | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/menus.py | menus.py |
import re
from typing import Callable, ClassVar, List, Optional, Pattern, Sequence, Tuple, Union, cast
import discord
from anbot.core import commands
_ID_RE = re.compile(r"([0-9]{15,21})$")
_USER_MENTION_RE = re.compile(r"<@!?([0-9]{15,21})>$")
_CHAN_MENTION_RE = re.compile(r"<#([0-9]{15,21})>$")
_ROLE_MENTION_RE = re.compile(r"<&([0-9]{15,21})>$")
class MessagePredicate(Callable[[discord.Message], bool]):
"""A simple collection of predicates for message events.
These predicates intend to help simplify checks in message events
and reduce boilerplate code.
This class should be created through the provided classmethods.
Instances of this class are callable message predicates, i.e. they
return ``True`` if a message matches the criteria.
All predicates are combined with :meth:`MessagePredicate.same_context`.
Examples
--------
Waiting for a response in the same channel and from the same
author::
await bot.wait_for("message", check=MessagePredicate.same_context(ctx))
Waiting for a response to a yes or no question::
pred = MessagePredicate.yes_or_no(ctx)
await bot.wait_for("message", check=pred)
if pred.result is True:
# User responded "yes"
...
Getting a member object from a user's response::
pred = MessagePredicate.valid_member(ctx)
await bot.wait_for("message", check=pred)
member = pred.result
Attributes
----------
result : Any
The object which the message content matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
def __init__(self, predicate: Callable[["MessagePredicate", discord.Message], bool]) -> None:
self._pred: Callable[["MessagePredicate", discord.Message], bool] = predicate
self.result = None
def __call__(self, message: discord.Message) -> bool:
return self._pred(self, message)
@classmethod
def same_context(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the reaction fits the described context.
Parameters
----------
ctx : Optional[Context]
The current invokation context.
channel : Optional[discord.TextChannel]
The channel we expect a message in. If unspecified,
defaults to ``ctx.channel``. If ``ctx`` is unspecified
too, the message's channel will be ignored.
user : Optional[discord.TextChannel]
The user we expect a message from. If unspecified,
defaults to ``ctx.author``. If ``ctx`` is unspecified
too, the message's author will be ignored.
Returns
-------
MessagePredicate
The event predicate.
"""
if ctx is not None:
channel = channel or ctx.channel
user = user or ctx.author
return cls(
lambda self, m: (user is None or user.id == m.author.id)
and (channel is None or channel.id == m.channel.id)
)
@classmethod
def cancelled(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is ``[p]cancel``.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(
lambda self, m: (same_context(m) and m.content.lower() == f"{ctx.prefix}cancel")
)
@classmethod
def yes_or_no(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the message is "yes"/"y" or "no"/"n".
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
content = m.content.lower()
if content in ("yes", "y"):
self.result = True
elif content in ("no", "n"):
self.result = False
else:
return False
return True
return cls(predicate)
@classmethod
def valid_int(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is an integer.
Assigns the response to `result` as an `int`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = int(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def valid_float(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a float.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = float(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def positive(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is a positive number.
Assigns the response to `result` as a `float`.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
number = float(m.content)
except ValueError:
return False
else:
if number > 0:
self.result = number
return True
else:
return False
return cls(predicate)
@classmethod
def valid_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role in the current guild.
Assigns the matching `discord.Role` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def valid_member(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a member in the current guild.
Assigns the matching `discord.Member` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _USER_MENTION_RE.match(m.content)
if match:
result = guild.get_member(int(match.group(1)))
else:
result = guild.get_member_named(m.content)
if result is None:
return False
self.result = result
return True
return cls(predicate)
@classmethod
def valid_text_channel(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a text channel in the current guild.
Assigns the matching `discord.TextChannel` object to `result`.
This predicate cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
match = _ID_RE.match(m.content) or _CHAN_MENTION_RE.match(m.content)
if match:
result = guild.get_channel(int(match.group(1)))
else:
result = discord.utils.get(guild.text_channels, name=m.content)
if not isinstance(result, discord.TextChannel):
return False
self.result = result
return True
return cls(predicate)
@classmethod
def has_role(
cls,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response refers to a role which the author has.
Assigns the matching `discord.Role` object to `result`.
One of ``user`` or ``ctx`` must be supplied. This predicate
cannot be used in DM.
Parameters
----------
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
guild = cls._get_guild(ctx, channel, cast(discord.Member, user))
if user is None:
if ctx is None:
raise TypeError(
"One of `user` or `ctx` must be supplied to `MessagePredicate.has_role`."
)
user = ctx.author
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
role = self._find_role(guild, m.content)
if role is None or role not in user.roles:
return False
self.result = role
return True
return cls(predicate)
@classmethod
def equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content == value)
@classmethod
def lower_equal_to(
cls,
value: str,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response *as lowercase* is equal to the specified value.
Parameters
----------
value : str
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and m.content.lower() == value)
@classmethod
def less(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is less than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: valid_int(m) or valid_float(m) and float(m.content) < value)
@classmethod
def greater(
cls,
value: Union[int, float],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is greater than the specified value.
Parameters
----------
value : Union[int, float]
The value to compare the response with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
valid_int = cls.valid_int(ctx, channel, user)
valid_float = cls.valid_float(ctx, channel, user)
return cls(lambda self, m: valid_int(m) or valid_float(m) and float(m.content) > value)
@classmethod
def length_less(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is less than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) <= length)
@classmethod
def length_greater(
cls,
length: int,
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response's length is greater than the specified length.
Parameters
----------
length : int
The value to compare the response's length with.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
return cls(lambda self, m: same_context(m) and len(m.content) >= length)
@classmethod
def contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response is contained in the specified collection.
The index of the response in the ``collection`` sequence is
assigned to the `result` attribute.
Parameters
----------
collection : Sequence[str]
The collection containing valid responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def lower_contained_in(
cls,
collection: Sequence[str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Same as :meth:`contained_in`, but the response is set to lowercase before matching.
Parameters
----------
collection : Sequence[str]
The collection containing valid lowercase responses.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
try:
self.result = collection.index(m.content.lower())
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def regex(
cls,
pattern: Union[Pattern[str], str],
ctx: Optional[commands.Context] = None,
channel: Optional[discord.TextChannel] = None,
user: Optional[discord.abc.User] = None,
) -> "MessagePredicate":
"""Match if the response matches the specified regex pattern.
This predicate will use `re.search` to find a match. The
resulting `match object <match-objects>` will be assigned
to `result`.
Parameters
----------
pattern : Union[`pattern object <re-objects>`, str]
The pattern to search for in the response.
ctx : Optional[Context]
Same as ``ctx`` in :meth:`same_context`.
channel : Optional[discord.TextChannel]
Same as ``channel`` in :meth:`same_context`.
user : Optional[discord.TextChannel]
Same as ``user`` in :meth:`same_context`.
Returns
-------
MessagePredicate
The event predicate.
"""
same_context = cls.same_context(ctx, channel, user)
def predicate(self: MessagePredicate, m: discord.Message) -> bool:
if not same_context(m):
return False
if isinstance(pattern, str):
pattern_obj = re.compile(pattern)
else:
pattern_obj = pattern
match = pattern_obj.search(m.content)
if match:
self.result = match
return True
return False
return cls(predicate)
@staticmethod
def _find_role(guild: discord.Guild, argument: str) -> Optional[discord.Role]:
match = _ID_RE.match(argument) or _ROLE_MENTION_RE.match(argument)
if match:
result = guild.get_role(int(match.group(1)))
else:
result = discord.utils.get(guild.roles, name=argument)
return result
@staticmethod
def _get_guild(
ctx: commands.Context, channel: discord.TextChannel, user: discord.Member
) -> discord.Guild:
if ctx is not None:
return ctx.guild
elif channel is not None:
return channel.guild
elif user is not None:
return user.guild
class ReactionPredicate(Callable[[discord.Reaction, discord.abc.User], bool]):
"""A collection of predicates for reaction events.
All checks are combined with :meth:`ReactionPredicate.same_context`.
Examples
--------
Confirming a yes/no question with a tick/cross reaction::
from anbot.core.utils.predicates import ReactionPredicate
from anbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("Yes or no?")
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
await ctx.bot.wait_for("reaction_add", check=pred)
if pred.result is True:
# User responded with tick
...
else:
# User responded with cross
...
Waiting for the first reaction from any user with one of the first
5 letters of the alphabet::
from anbot.core.utils.predicates import ReactionPredicate
from anbot.core.utils.menus import start_adding_reactions
msg = await ctx.send("React to me!")
emojis = ReactionPredicate.ALPHABET_EMOJIS[:5]
start_adding_reactions(msg, emojis)
pred = ReactionPredicate.with_emojis(emojis, msg)
await ctx.bot.wait_for("reaction_add", check=pred)
# pred.result is now the index of the letter in `emojis`
Attributes
----------
result : Any
The object which the message content matched with. This is
dependent on the predicate used - see each predicate's
documentation for details, not every method will assign this
attribute. Defaults to ``None``.
"""
YES_OR_NO_EMOJIS: ClassVar[Tuple[str, str]] = (
"\N{WHITE HEAVY CHECK MARK}",
"\N{NEGATIVE SQUARED CROSS MARK}",
)
"""Tuple[str, str] : A tuple containing the tick emoji and cross emoji, in that order."""
ALPHABET_EMOJIS: ClassVar[List[str]] = [
chr(code)
for code in range(
ord("\N{REGIONAL INDICATOR SYMBOL LETTER A}"),
ord("\N{REGIONAL INDICATOR SYMBOL LETTER Z}") + 1,
)
]
"""List[str] : A list of all 26 alphabetical letter emojis."""
NUMBER_EMOJIS: ClassVar[List[str]] = [
chr(code) + "\N{COMBINING ENCLOSING KEYCAP}" for code in range(ord("0"), ord("9") + 1)
]
"""List[str] : A list of all single-digit number emojis, 0 through 9."""
def __init__(
self, predicate: Callable[["ReactionPredicate", discord.Reaction, discord.abc.User], bool]
) -> None:
self._pred: Callable[
["ReactionPredicate", discord.Reaction, discord.abc.User], bool
] = predicate
self.result = None
def __call__(self, reaction: discord.Reaction, user: discord.abc.User) -> bool:
return self._pred(self, reaction, user)
# noinspection PyUnusedLocal
@classmethod
def same_context(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if a reaction fits the described context.
This will ignore reactions added by the bot user, regardless
of whether or not ``user`` is supplied.
Parameters
----------
message : Optional[discord.Message]
The message which we expect a reaction to. If unspecified,
the reaction's message will be ignored.
user : Optional[discord.abc.User]
The user we expect to react. If unspecified, the user who
added the reaction will be ignored.
Returns
-------
ReactionPredicate
The event predicate.
"""
# noinspection PyProtectedMember
me_id = message._state.self_id
return cls(
lambda self, r, u: u.id != me_id
and (message is None or r.message.id == message.id)
and (user is None or u.id == user.id)
)
@classmethod
def with_emojis(
cls,
emojis: Sequence[Union[str, discord.Emoji, discord.PartialEmoji]],
message: Optional[discord.Message] = None,
user: Optional[discord.abc.User] = None,
) -> "ReactionPredicate":
"""Match if the reaction is one of the specified emojis.
Parameters
----------
emojis : Sequence[Union[str, discord.Emoji, discord.PartialEmoji]]
The emojis of which one we expect to be reacted.
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User):
if not same_context(r, u):
return False
try:
self.result = emojis.index(r.emoji)
except ValueError:
return False
else:
return True
return cls(predicate)
@classmethod
def yes_or_no(
cls, message: Optional[discord.Message] = None, user: Optional[discord.abc.User] = None
) -> "ReactionPredicate":
"""Match if the reaction is a tick or cross emoji.
The emojis used can are in
`ReactionPredicate.YES_OR_NO_EMOJIS`.
This will assign ``True`` for *yes*, or ``False`` for *no* to
the `result` attribute.
Parameters
----------
message : discord.Message
Same as ``message`` in :meth:`same_context`.
user : Optional[discord.abc.User]
Same as ``user`` in :meth:`same_context`.
Returns
-------
ReactionPredicate
The event predicate.
"""
same_context = cls.same_context(message, user)
def predicate(self: ReactionPredicate, r: discord.Reaction, u: discord.abc.User) -> bool:
if not same_context(r, u):
return False
try:
self.result = not bool(self.YES_OR_NO_EMOJIS.index(r.emoji))
except ValueError:
return False
else:
return True
return cls(predicate) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/predicates.py | predicates.py |
import discord
from datetime import datetime
from anbot.core.utils.chat_formatting import pagify
import io
import weakref
from typing import List, Optional
from .common_filters import filter_mass_mentions
_instances = weakref.WeakValueDictionary({})
class TunnelMeta(type):
"""
lets prevent having multiple tunnels with the same
places involved.
"""
def __call__(cls, *args, **kwargs):
lockout_tuple = ((kwargs.get("sender"), kwargs.get("origin")), kwargs.get("recipient"))
if lockout_tuple in _instances:
return _instances[lockout_tuple]
# this is needed because weakvalue dicts can
# change size without warning if an object is discarded
# it can raise a runtime error, so ..
while True:
try:
if not (
any(lockout_tuple[0] == x[0] for x in _instances.keys())
or any(lockout_tuple[1] == x[1] for x in _instances.keys())
):
# if this isn't temporarily stored, the weakref dict
# will discard this before the return statement,
# causing a key error
temp = super(TunnelMeta, cls).__call__(*args, **kwargs)
_instances[lockout_tuple] = temp
return temp
except: # NOQA: E722
# Am I really supposed to except a runtime error flake >.>
continue
else:
return None
class Tunnel(metaclass=TunnelMeta):
"""
A tunnel interface for messages
This will return None on init if the destination
or source + origin pair is already in use, or the
existing tunnel object if one exists for the designated
parameters
Attributes
----------
sender: `discord.Member`
The person who opened the tunnel
origin: `discord.TextChannel`
The channel in which it was opened
recipient: `discord.User`
The user on the other end of the tunnel
"""
def __init__(
self, *, sender: discord.Member, origin: discord.TextChannel, recipient: discord.User
):
self.sender = sender
self.origin = origin
self.recipient = recipient
self.last_interaction = datetime.utcnow()
async def react_close(self, *, uid: int, message: str = ""):
send_to = self.origin if uid == self.sender.id else self.sender
closer = next(filter(lambda x: x.id == uid, (self.sender, self.recipient)), None)
await send_to.send(filter_mass_mentions(message.format(closer=closer)))
@property
def members(self):
return self.sender, self.recipient
@property
def minutes_since(self):
return int((self.last_interaction - datetime.utcnow()).seconds / 60)
@staticmethod
async def message_forwarder(
*,
destination: discord.abc.Messageable,
content: str = None,
embed=None,
files: Optional[List[discord.File]] = None
) -> List[discord.Message]:
"""
This does the actual sending, use this instead of a full tunnel
if you are using command initiated reactions instead of persistent
event based ones
Parameters
----------
destination: discord.abc.Messageable
Where to send
content: str
The message content
embed: discord.Embed
The embed to send
files: Optional[List[discord.File]]
A list of files to send.
Returns
-------
List[discord.Message]
The messages sent as a result.
Raises
------
discord.Forbidden
see `discord.abc.Messageable.send`
discord.HTTPException
see `discord.abc.Messageable.send`
"""
rets = []
if content:
for page in pagify(content):
rets.append(await destination.send(page, files=files, embed=embed))
if files:
del files
if embed:
del embed
elif embed or files:
rets.append(await destination.send(files=files, embed=embed))
return rets
@staticmethod
async def files_from_attatch(m: discord.Message) -> List[discord.File]:
"""
makes a list of file objects from a message
returns an empty list if none, or if the sum of file sizes
is too large for the bot to send
Parameters
---------
m: `discord.Message`
A message to get attachments from
Returns
-------
list of `discord.File`
A list of `discord.File` objects
"""
files = []
max_size = 8 * 1000 * 1000
if m.attachments and sum(a.size for a in m.attachments) <= max_size:
for a in m.attachments:
_fp = io.BytesIO()
await a.save(_fp)
files.append(discord.File(_fp, filename=a.filename))
return files
async def communicate(
self, *, message: discord.Message, topic: str = None, skip_message_content: bool = False
):
"""
Forwards a message.
Parameters
----------
message : `discord.Message`
The message to forward
topic : `str`
A string to prepend
skip_message_content : `bool`
If this flag is set, only the topic will be sent
Returns
-------
`int`, `int`
a pair of ints matching the ids of the
message which was forwarded
and the last message the bot sent to do that.
useful if waiting for reactions.
Raises
------
discord.Forbidden
This should only happen if the user's DMs are disabled
the bot can't upload at the origin channel
or can't add reactions there.
"""
if message.channel == self.origin and message.author == self.sender:
send_to = self.recipient
elif message.author == self.recipient and isinstance(message.channel, discord.DMChannel):
send_to = self.origin
else:
return None
if not skip_message_content:
content = "\n".join((topic, message.content)) if topic else message.content
else:
content = topic
if message.attachments:
attach = await self.files_from_attatch(message)
if not attach:
await message.channel.send(
"Could not forward attatchments. "
"Total size of attachments in a single "
"message must be less than 8MB."
)
else:
attach = []
rets = await self.message_forwarder(destination=send_to, content=content, files=attach)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
await message.add_reaction("\N{NEGATIVE SQUARED CROSS MARK}")
self.last_interaction = datetime.utcnow()
await rets[-1].add_reaction("\N{NEGATIVE SQUARED CROSS MARK}")
return [rets[-1].id, message.id] | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/tunnel.py | tunnel.py |
import asyncio
from datetime import timedelta
from typing import List, Iterable, Union, TYPE_CHECKING, Dict
import discord
if TYPE_CHECKING:
from .. import Config
from ..bot import AN
from ..commands import Context
async def mass_purge(messages: List[discord.Message], channel: discord.TextChannel):
"""Bulk delete messages from a channel.
If more than 100 messages are supplied, the bot will delete 100 messages at
a time, sleeping between each action.
Note
----
Messages must not be older than 14 days, and the bot must not be a user
account.
Parameters
----------
messages : `list` of `discord.Message`
The messages to bulk delete.
channel : discord.TextChannel
The channel to delete messages from.
Raises
------
discord.Forbidden
You do not have proper permissions to delete the messages or you’re not
using a bot account.
discord.HTTPException
Deleting the messages failed.
"""
while messages:
if len(messages) > 1:
await channel.delete_messages(messages[:100])
messages = messages[100:]
else:
await messages[0].delete()
messages = []
await asyncio.sleep(1.5)
async def slow_deletion(messages: Iterable[discord.Message]):
"""Delete a list of messages one at a time.
Any exceptions raised when trying to delete the message will be silenced.
Parameters
----------
messages : `iterable` of `discord.Message`
The messages to delete.
"""
for message in messages:
try:
await message.delete()
except discord.HTTPException:
pass
def get_audit_reason(author: discord.Member, reason: str = None):
"""Construct a reason to appear in the audit log.
Parameters
----------
author : discord.Member
The author behind the audit log action.
reason : str
The reason behind the audit log action.
Returns
-------
str
The formatted audit log reason.
"""
return (
"Action requested by {} (ID {}). Reason: {}".format(author, author.id, reason)
if reason
else "Action requested by {} (ID {}).".format(author, author.id)
)
async def is_allowed_by_hierarchy(
bot: "AN", settings: "Config", guild: discord.Guild, mod: discord.Member, user: discord.Member
):
if not await settings.guild(guild).respect_hierarchy():
return True
is_special = mod == guild.owner or await bot.is_owner(mod)
return mod.top_role.position > user.top_role.position or is_special
async def is_mod_or_superior(
bot: "AN", obj: Union[discord.Message, discord.Member, discord.Role]
):
"""Check if an object has mod or superior permissions.
If a message is passed, its author's permissions are checked. If a role is
passed, it simply checks if it is one of either the admin or mod roles.
Parameters
----------
bot : anbot.core.bot.AN
The bot object.
obj : `discord.Message` or `discord.Member` or `discord.Role`
The object to check permissions for.
Returns
-------
bool
:code:`True` if the object has mod permissions.
Raises
------
TypeError
If the wrong type of ``obj`` was passed.
"""
user = None
if isinstance(obj, discord.Message):
user = obj.author
elif isinstance(obj, discord.Member):
user = obj
elif isinstance(obj, discord.Role):
pass
else:
raise TypeError("Only messages, members or roles may be passed")
server = obj.guild
admin_role_id = await bot.db.guild(server).admin_role()
mod_role_id = await bot.db.guild(server).mod_role()
if isinstance(obj, discord.Role):
return obj.id in [admin_role_id, mod_role_id]
if await bot.is_owner(user):
return True
elif discord.utils.find(lambda r: r.id in (admin_role_id, mod_role_id), user.roles):
return True
else:
return False
def strfdelta(delta: timedelta):
"""Format a timedelta object to a message with time units.
Parameters
----------
delta : datetime.timedelta
The duration to parse.
Returns
-------
str
A message representing the timedelta with units.
"""
s = []
if delta.days:
ds = "%i day" % delta.days
if delta.days > 1:
ds += "s"
s.append(ds)
hrs, rem = divmod(delta.seconds, 60 * 60)
if hrs:
hs = "%i hr" % hrs
if hrs > 1:
hs += "s"
s.append(hs)
mins, secs = divmod(rem, 60)
if mins:
s.append("%i min" % mins)
if secs:
s.append("%i sec" % secs)
return " ".join(s)
async def is_admin_or_superior(
bot: "AN", obj: Union[discord.Message, discord.Member, discord.Role]
):
"""Same as `is_mod_or_superior` except for admin permissions.
If a message is passed, its author's permissions are checked. If a role is
passed, it simply checks if it is the admin role.
Parameters
----------
bot : anbot.core.bot.AN
The bot object.
obj : `discord.Message` or `discord.Member` or `discord.Role`
The object to check permissions for.
Returns
-------
bool
:code:`True` if the object has admin permissions.
Raises
------
TypeError
If the wrong type of ``obj`` was passed.
"""
user = None
if isinstance(obj, discord.Message):
user = obj.author
elif isinstance(obj, discord.Member):
user = obj
elif isinstance(obj, discord.Role):
pass
else:
raise TypeError("Only messages, members or roles may be passed")
admin_role_id = await bot.db.guild(obj.guild).admin_role()
if isinstance(obj, discord.Role):
return obj.id == admin_role_id
if user and await bot.is_owner(user):
return True
elif discord.utils.get(user.roles, id=admin_role_id):
return True
else:
return False
async def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> bool:
"""Check if the author has required permissions.
This will always return ``True`` if the author is a bot owner, or
has the ``administrator`` permission. If ``perms`` is empty, this
will only check if the user is a bot owner.
Parameters
----------
ctx : Context
The command invokation context to check.
perms : Dict[str, bool]
A dictionary mapping permissions to their required states.
Valid permission names are those listed as properties of
the `discord.Permissions` class.
Returns
-------
bool
``True`` if the author has the required permissions.
"""
if await ctx.bot.is_owner(ctx.author):
return True
elif not perms:
return False
resolved = ctx.channel.permissions_for(ctx.author)
return resolved.administrator or all(
getattr(resolved, name, None) == value for name, value in perms.items()
) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/mod.py | mod.py |
import re
__all__ = [
"URL_RE",
"INVITE_URL_RE",
"MASS_MENTION_RE",
"filter_urls",
"filter_invites",
"filter_mass_mentions",
"filter_various_mentions",
"normalize_smartquotes",
"escape_spoilers",
"escape_spoilers_and_mass_mentions",
]
# regexes
URL_RE = re.compile(r"(https?|s?ftp)://(\S+)", re.I)
INVITE_URL_RE = re.compile(r"(discord.gg|discordapp.com/invite|discord.me)(\S+)", re.I)
MASS_MENTION_RE = re.compile(r"(@)(?=everyone|here)") # This only matches the @ for sanitizing
OTHER_MENTION_RE = re.compile(r"(<)(@[!&]?|#)(\d+>)")
SMART_QUOTE_REPLACEMENT_DICT = {
"\u2018": "'", # Left single quote
"\u2019": "'", # Right single quote
"\u201C": '"', # Left double quote
"\u201D": '"', # Right double quote
}
SMART_QUOTE_REPLACE_RE = re.compile("|".join(SMART_QUOTE_REPLACEMENT_DICT.keys()))
SPOILER_CONTENT_RE = re.compile(
r"(?s)(?<!\\)(?P<OPEN>\|{2})(?P<SPOILERED>.*?)(?<!\\)(?P<CLOSE>\|{2})"
)
# convenience wrappers
def filter_urls(to_filter: str) -> str:
"""Get a string with URLs sanitized.
This will match any URLs starting with these protocols:
- ``http://``
- ``https://``
- ``ftp://``
- ``sftp://``
Parameters
----------
to_filter : str
The string to filter.
Returns
-------
str
The sanitized string.
"""
return URL_RE.sub("[SANITIZED URL]", to_filter)
def filter_invites(to_filter: str) -> str:
"""Get a string with discord invites sanitized.
Will match any discord.gg, discordapp.com/invite, or discord.me
invite URL.
Parameters
----------
to_filter : str
The string to filter.
Returns
-------
str
The sanitized string.
"""
return INVITE_URL_RE.sub("[SANITIZED INVITE]", to_filter)
def filter_mass_mentions(to_filter: str) -> str:
"""Get a string with mass mentions sanitized.
Will match any *here* and/or *everyone* mentions.
Parameters
----------
to_filter : str
The string to filter.
Returns
-------
str
The sanitized string.
"""
return MASS_MENTION_RE.sub("@\u200b", to_filter)
def filter_various_mentions(to_filter: str) -> str:
"""
Get a string with role, user, and channel mentions sanitized.
This is mainly for use on user display names, not message content,
and should be applied sparingly.
Parameters
----------
to_filter : str
The string to filter.
Returns
-------
str
The sanitized string.
"""
return OTHER_MENTION_RE.sub(r"\1\\\2\3", to_filter)
def normalize_smartquotes(to_normalize: str) -> str:
"""
Get a string with smart quotes replaced with normal ones
Parameters
----------
to_normalize : str
The string to normalize.
Returns
-------
str
The normalized string.
"""
def replacement_for(obj):
return SMART_QUOTE_REPLACEMENT_DICT.get(obj.group(0), "")
return SMART_QUOTE_REPLACE_RE.sub(replacement_for, to_normalize)
def escape_spoilers(content: str) -> str:
"""
Get a string with spoiler syntax escaped.
Parameters
----------
content : str
The string to escape.
Returns
-------
str
The escaped string.
"""
return SPOILER_CONTENT_RE.sub(r"\\\g<OPEN>\g<SPOILERED>\\\g<CLOSE>", content)
def escape_spoilers_and_mass_mentions(content: str) -> str:
"""
Get a string with spoiler syntax and mass mentions escaped
Parameters
----------
content : str
The string to escape.
Returns
-------
str
The escaped string.
"""
return escape_spoilers(filter_mass_mentions(content)) | AN-DiscordBot | /AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/core/utils/common_filters.py | common_filters.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.