file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sandbox.ts | {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public resolveSelector(
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id: nextChildUid(this.$id + '-c.'),
...options,
...inlineOptions,
component: ComponentConstructor,
el,
});
}
/**
* ```js
* stop()
* ```
*
* **DEPRECATED!** Use `sandbox.destroy()` instead.
*
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.stop();
*/
public async stop(): Promise<void> {
if (process.env.NODE_ENV !== 'production') {
this.$warn(
`Sandbox.stop is deprecated. Use the "destroy" method instead`,
);
}
return this.destroy();
}
/**
* ```js
* destroy()
* ```
*
* Enhances `Component.destroy()`.
* Stops every running component, clears sandbox events and destroys the instance.
*
* @deprecated
* @fires Sandbox#beforeStop
* @fires Sandbox#stop
* @returns {Promise<void>}
* @example
* sandbox.destroy();
*/
public async destroy(): Promise<void> | {
this.emit('beforeStop');
await this.beforeDestroy();
this.removeListeners();
try {
if (this.$el) {
this.$el.removeAttribute(Sandbox.SB_DATA_ATTR);
}
await this.destroyRefs();
this.$active = false;
} catch (e) {
this.emit('error', e);
return Promise.reject(e);
}
this.$instances.clear();
this.emit('stop');
this.clear();
| identifier_body |
|
sandbox.ts | const nextSbUid = createSequence();
const nextChildUid = createSequence();
/**
* A sandbox can be used to initialize a set of components based on an element's innerHTML.
*
* Lets say we have the following component:
*
* ```js
* class Counter extends Component {
* static root = '.Counter';
*
* // other stuff here ...
* }
* ```
*
* We can register the component inside a sandbox like this:
*
* ```js
* const sandbox = new Sandbox({
* components: [Counter],
* id: 'main', // optional
* });
*
* sandbox.mount('#main');
* ```
*
* In this way the sandbox will attach itself to the element matching `#main` and will traverse its children
* looking for every `.Counter` element attaching an instance of the Counter component onto it.
*
* To prevent a component for being initialized (for example when you want to initialize it at a later moment)
* just add a `data-skip` attribute to its root element.
*
* @class
* @param {object} config
* @param {Component[]|[Component, object][]} [config.components] Array of components constructor or array with [ComponentConstructor, options]
* @param {HTMLElement|string} [config.root=document.body] Root element of the sandbox. Either a DOM element or a CSS selector
* @param {string} [config.id] ID of the sandbox
* @property {string} $id Sandbox internal id
* @property {HTMLElement} $el Sandbox root DOM element
* @property {Context} $ctx Internal [context](/packages/yuzu-application/api/context). Used to share data across child instances
* @property {object[]} $registry Registered components storage
* @property {Map} $instances Running instances storage
* @returns {Sandbox}
*/
export class Sandbox<S = {}> extends Component<S, ISandboxOptions> {
public static SB_DATA_ATTR = 'data-yuzu-sb';
public defaultOptions(): ISandboxOptions {
return {
components: [],
context: createContext(),
id: '',
root: document.body,
};
}
public $id: string;
public $ctx?: IContext;
public $registry: ISandboxRegistryEntry[] = [];
public $instances = new Map<
string | entrySelectorFn,
Component<any, any>[]
>();
/**
* Creates a sandbox instance.
*
* @constructor
*/
public constructor(options: Partial<ISandboxOptions> = {}) {
super(options);
const { components = [], id } = this.options;
this.$id = id || nextSbUid('_sbx-');
components.forEach((config) => {
if (!Array.isArray(config)) {
if (config.root) {
this.register({ component: config, selector: config.root });
}
if (process.env.NODE_ENV !== 'production') {
!config.root &&
this.$warn(
`Skipping component ${config.displayName ||
config.name} because static "root" selector is missing`,
);
}
} else {
const [component, params = {}] = config;
const selector = component.root || params.selector;
if (selector) {
this.register({ component, selector, ...params });
}
if (process.env.NODE_ENV !== 'production') {
!selector &&
this.$warn(
`Skipping component ${component.displayName ||
component.name} because a static "root" selector is missing and no "selector" param is passed-in`,
);
}
}
});
return this;
}
/**
* ```js
* register(params)
* ```
*
* Registers a new component into the sandbox. The registered components
* will be traversed on `.mount()` initializing every matching component.
*
* @param {object} params Every property other than `component` and `selector` will be used as component option
* @param {Component} params.component Component constructor
* @param {string} params.selector Child component root CSS selector
* @example
* sandbox.register({
* component: Counter,
* selector: '.Counter',
* theme: 'dark' // <-- instance options
* });
*/
public register<C extends Component<any, any>>(params: {
component: IComponentConstructable<C>;
selector: string | entrySelectorFn;
[key: string]: any;
}): void {
invariant(
Component.isComponent(params.component),
'Missing or invalid `component` property',
);
invariant(
typeof params.selector === 'string' ||
typeof params.selector === 'function',
'Missing `selector` property',
);
this.$registry.push(params);
}
/**
* ```js
* start([data])
* ```
*
* **DEPRECATED!** Use `sandbox.mount(root)` instead.
*
* Starts the sandbox with an optional context.
*
* The store will be available inside each component at `this.$context`.
*
* @deprecated
* @param {object} [data] Optional context data object to be injected into the child components.
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
* @example
* sandbox.start();
*
* // with context data
* sandbox.start({ globalTheme: 'dark' });
*/
public start(data = {}): this {
Object.defineProperty(this, '$legacyStart', { value: true });
if (process.env.NODE_ENV !== 'production') {
this.$warn(`Sandbox.start is deprecated. Use the "mount" method instead`);
}
this.mount(this.options.root);
this.setup();
this.$ctx && this.$ctx.update(data);
this.discover();
return this;
}
/**
* ```js
* mount([el], [state])
* ```
*
* Enhances `Component.mount()` by firing the child components discovery logic.
* By default will use `document.body` as mount element.
*
* @param {string|Element} el Component's root element
* @param {object|null} [state={}] Initial state
* @fires Sandbox#beforeStart
* @fires Sandbox#start Events dispatched after all components are initialized
* @returns {Sandbox}
*/
public mount(el: string | Element, state: Partial<S> | null = {}): this {
super.mount(el, state);
this.$el.setAttribute(Sandbox.SB_DATA_ATTR, '');
if (!this.hasOwnProperty('$legacyStart')) {
this.setup();
this.discover();
}
return this;
}
/**
* Setups the sandbox context passed in the options.
*
* @ignore
*/
public setup(): void {
this.$ctx = this.options.context;
this.$ctx.inject(this);
}
/**
* Initializes the sandbox child components.
*
* @ignore
* @returns {Promise}
*/
public discover(): Promise<void> {
invariant(isElement(this.$el), '"this.$el" is not a DOM element');
this.emit('beforeStart');
const sbSelector = `[${Sandbox.SB_DATA_ATTR}]`;
const ret = this.$registry.map(
async ({ component: ComponentConstructor, selector, ...options }) => {
if (this.$instances.has(selector)) {
this.$warn(
`Component ${ComponentConstructor} already initialized on ${selector}`,
);
return;
}
const targets = this.resolveSelector(selector);
let instances: Promise<Component<any, any>>[] | undefined;
if (targets === true) {
instances = [this.createInstance(ComponentConstructor, options)];
} else if (Array.isArray(targets)) {
const { $el } = this;
instances = targets
.filter((el) => {
return (
isElement(el) &&
!el.dataset.skip &&
!el.closest('[data-skip]') &&
el.closest(sbSelector) === $el
);
})
.map((el) => {
return this.createInstance(ComponentConstructor, options, el);
});
}
if (instances) {
this.$instances.set(selector, await Promise.all(instances));
}
return true;
},
);
return Promise.all(ret).then(() => {
this.emit('start');
});
}
/**
* Resolves a configured component selector to a list of DOM nodes or a boolean (for detached components)
*
* @ignore
* @param {string|function} selector Selector string or function.
* @returns {HTMLElement[]|boolean}
*/
public | (
selector: string | entrySelectorFn,
): HTMLElement[] | boolean {
let targets = evaluate(selector, this);
if (typeof targets === 'string') {
targets = this.findNodes(targets) as HTMLElement[];
}
return targets;
}
/**
* Creates a component instance.
* Reads inline components from the passed-in root DOM element.
*
* @ignore
* @param {object} options instance options
* @param {HTMLElement} [el] Root element
* @returns {Component}
*/
public createInstance<C extends Component<any, any>>(
ComponentConstructor: IComponentConstructable<C>,
options: Record<string, any>,
el?: HTMLElement,
): Promise<C> {
const inlineOptions = el ? datasetParser(el) : {};
return this.setRef({
id | resolveSelector | identifier_name |
HexSnake.py | ,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
|
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
| rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry | identifier_body |
HexSnake.py | 0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
|
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
| cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2) | conditional_block |
HexSnake.py | ,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step | dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2),
(display_width/4.,0),
(display_width*3./4.,0),
(display_width,display_height/2),
(display_width*3./4.,display_height),
(display_width/4.,display_height)], 0)
for j in range(n): ds(s-1,j)
for i in range(s-1):
for j in range(s+i): ds(i,j)
for j in range(i+1,n): ds(i+s,j)
def next(self,x,y,d):
rx, ry= further(x,y,d)
if self.out(rx,ry):
while (not self.out(x,y) ) :
x,y= further(x,y,(d+3)%6)
rx, ry= further(x,y,d)
if not loop:
self.field[rx][ry]=KILLER
return rx,ry
def crawl(self):
f=self.field
x,y=self.body[-1]
if f[x][y]==BODY: f[x][y],t=TAIL,[(x,y)]
else : f[x][y],t=EMPTY,[]
x,y=self.body[0]
if f[x][y]!=BODY:
f[x][y]=TAIL
x,y=self.next(x,y,self.cdir)
| random_line_split |
|
HexSnake.py | (x,y,i,j):
d=3 if abs(x-i)>1 or abs(y-j)>1 else 0
v=(x-i)/abs(x-i) if x!=i else 0
h=(y-j)/abs(y-j) if y!=j else 0
return (d+{
(0,-1):0,
(-1,-1):1,
(-1,0):2,
(0,1):3,
(1,1):4,
(1,0):5
}[(v,h)])%6
if True: # objects
HEAD=-3
BODY=-2
TAIL=-1
APPLE=1
BORDER=-10
KILLER=-20
EXTRAAPPLE=11
EXTRABORDER=12
LENGTHEN=13
SHORTEN=14
REVERSE=15
EXTRAKILLER=16
EXTRASCORE=17
BONUS=10
EMPTY=0
if True: # colors
colorSnake=(2,250,200)
colorGrass=(0,100,20)
colorBorder=(250,120,0)
colorApple=(250,0,0)
colorMouth=(250,200,200)
colorBonus=(40,0,240)
colorText=(240,240,10)
if True:
s=int(raw_input("What is field size? (more than 2) : "))
loop={'y':False,'n':True}[raw_input("Do borders kill? ('y' or 'n') : ")[0]] # read params
difficulty=int(raw_input("""
0 - "peaceful" : bonuses only make better
1 - "easy" : choose to eat a bonus would be well more often
2 - "medium" : bonuces are good or evil fifty fifty
3 - "hard" : basically, eat a bonus is of no benefit
4 - "insane" : the game is droven crazy
How difficult will the game be? (a digit) : """))
bonus_frequency=3
bonuses={ 0 : [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE],
1: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,SHORTEN,EXTRAAPPLE],
2: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRASCORE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
3: [EXTRAAPPLE,SHORTEN,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
4: [EXTRAAPPLE,REVERSE,REVERSE,EXTRABORDER,LENGTHEN,EXTRAKILLER],
}[difficulty]
ZEROSPEED=35
speedlevel=s#15# float(raw_input("How fast does the snake crawl? \n"))
lose=False
pause = False
if True: # pygame init
pygame.init()
display_width = 800
display_height = int(display_width*0.85)
screen = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('HexoSnake')
fontPause = pygame.font.SysFont("monospace", 40)
fontRules = pygame.font.SysFont("monospace", 13)
textGO = fontPause.render("GAME OVER!", 1, colorText)
textPause = fontPause.render("Paused...", 1, colorText)
textScore = fontRules.render("Score : ",1,colorText)
textSpeed = fontRules.render("Speed : ",1,colorText)
Rules = ["Controls:",
"q / e : left/right ",
"a / d : left/right x2 ",
"w / s : speed up/down ",
"p : pause",
"esc : quit"]
textRules=[fontRules.render(rule, 1, colorText)for rule in Rules]
class Snake:
field_size=1
field_dimension=1
field = zeros((1,1))
display_width_step=display_width
display_height_step=display_height
def __init__(self,x,y,sc,d):
self.score=sc
self.body=[(x,y)]*sc
self.cdir=d
def initfield(self,s):
n=2*s-1
self.field_size=s
self.field_dimension=n
self.field = zeros((n,n))
self.display_width_step=display_width/n
self.display_height_step=display_height/n
def out(self,x,y):
s=self.field_size
n=self.field_dimension
if x<0 or y<0: return True
if x>=n or y>=n: return True
if x>=s and y<=x-s : return True
if y>=s and x<=y-s : return True
if self.field[x][y]==BORDER : return True
return False
def setobj(self,a,t):
while a>0:
i=int(self.field_dimension*rnd())
j=int(self.field_dimension*rnd())
if self.field[i][j]==EMPTY and not self.out(i,j):
a=a-1
self.field[i][j]=t
def display_crds(self,x,y):
n=self.field_dimension
return (1+display_width/(4*n)+display_width/4
+x*display_width/(n)-display_width*y/2/(n),
1+display_height/(2*n)+y*display_height/(n))
def drawsymbol(self,i,j):
l=self.field[i][j]
crds=self.display_crds(i,j)
dw=self.display_width_step
dh=self.display_height_step
if l==TAIL:
pygame.draw.circle(screen, colorSnake, crds, dh/2, 0)
if l==BODY:
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
if l==HEAD:
x,y=further(i,j,self.cdir)
x,y=self.display_crds(x,y)
mcrds=((x+2*crds[0])/3,(y+2*crds[1])/3)
pygame.draw.circle(screen, colorSnake, crds, dw/2, 0)
pygame.draw.circle(screen, colorMouth, mcrds, dw/6, 0)
if l==APPLE :
pygame.draw.circle(screen, colorApple, crds, dh/2, 0)
if l==EMPTY:
return
if l==KILLER:
cx,cy=crds
pygame.draw.line(screen,colorBorder,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBorder,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBorder,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBorder,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BORDER:
pygame.draw.circle(screen, colorBorder, crds, dh/2, 0)
if l==EXTRABORDER or l==EXTRAAPPLE :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
if l==SHORTEN or l==LENGTHEN :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/2,cy),(cx+dh/2,cy),2)
pygame.draw.line(screen,colorBonus,(cx,cy-dh/2),(cx,cy+dh/2),2)
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==REVERSE or l==EXTRAKILLER :
cx,cy=crds
pygame.draw.line(screen,colorBonus,(cx-dh/3,cy-dh/3),(cx+dh/3,cy+dh/3),2)
pygame.draw.line(screen,colorBonus,(cx+dh/3,cy-dh/3),(cx-dh/3,cy+dh/3),2)
if l==BONUS or l==EXTRASCORE:
pygame.draw.circle(screen, colorBonus, crds, dh/2, 0)
return
def drawfield(self):
n=self.field_dimension
s=self.field_size
ds=self.drawsymbol
screen.fill((0,0,0))
pygame.draw.polygon(screen, colorGrass, [
(0,display_height/2 | dir | identifier_name |
|
helpers.ts | , functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static projectPath(path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, ""); | }
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr);
}
}
);
});
return out;
}
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] | return basePath + path; | random_line_split |
helpers.ts | functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static | (path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, "");
return basePath + path;
}
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr);
}
}
);
});
return out;
}
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] | projectPath | identifier_name |
helpers.ts | functions: []},
};
static functionRegex: any = null;
static relationMethods = ['has', 'orHas', 'whereHas', 'orWhereHas', 'whereDoesntHave', 'orWhereDoesntHave',
'doesntHave', 'orDoesntHave', 'hasMorph', 'orHasMorph', 'doesntHaveMorph', 'orDoesntHaveMorph',
'whereHasMorph', 'orWhereHasMorph', 'whereDoesntHaveMorph', 'orWhereDoesntHaveMorph',
'withAggregate', 'withCount', 'withMax', 'withMin', 'withSum', 'withAvg'];
/**
* Create full path from project file name
*
* @param path
* @param forCode
* @param string
*/
static projectPath(path:string, forCode: boolean = false) : string {
if (path[0] !== '/') {
path = '/' + path;
}
let basePath = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePath');
if (forCode === false && basePath && basePath.length > 0) {
if (basePath.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePath = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePath);
}
basePath = basePath.replace(/[\/\\]$/, "");
return basePath + path;
}
let basePathForCode = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('basePathForCode');
if (forCode && basePathForCode && basePathForCode.length > 0) {
if (basePathForCode.startsWith('.') && vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0) {
basePathForCode = resolve(vscode.workspace.workspaceFolders[0].uri.fsPath, basePathForCode);
}
basePathForCode = basePathForCode.replace(/[\/\\]$/, "");
return basePathForCode + path;
}
if (vscode.workspace.workspaceFolders instanceof Array && vscode.workspace.workspaceFolders.length > 0) {
for (let workspaceFolder of vscode.workspace.workspaceFolders) {
if (fs.existsSync(workspaceFolder.uri.fsPath + "/artisan")) {
return workspaceFolder.uri.fsPath + path;
}
}
}
return "";
}
static arrayUnique(value:any, index:any, self:Array<any>) {
return self.indexOf(value) === index;
}
/**
* Boot laravel and run simple php code.
*
* @param code
*/
static runLaravel(code: string) : Promise<string> {
code = code.replace(/(?:\r\n|\r|\n)/g, ' ');
if (fs.existsSync(Helpers.projectPath("vendor/autoload.php")) && fs.existsSync(Helpers.projectPath("bootstrap/app.php"))) {
var command =
"define('LARAVEL_START', microtime(true));" +
"require_once '" + Helpers.projectPath("vendor/autoload.php", true) + "';" +
"$app = require_once '" + Helpers.projectPath("bootstrap/app.php", true) + "';" +
"class VscodeLaravelExtraIntellisenseProvider extends \\Illuminate\\Support\\ServiceProvider" +
"{" +
" public function register() {}" +
" public function boot()" +
" {" +
" if (method_exists($this->app['log'], 'setHandlers')) {" +
" $this->app['log']->setHandlers([new \\Monolog\\Handler\\NullHandler()]);" +
" }" +
" }" +
"}" +
"$app->register(new VscodeLaravelExtraIntellisenseProvider($app));" +
"$kernel = $app->make(Illuminate\\Contracts\\Console\\Kernel::class);" +
"$status = $kernel->handle(" +
"$input = new Symfony\\Component\\Console\\Input\\ArgvInput," +
"new Symfony\\Component\\Console\\Output\\ConsoleOutput" +
");" +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___';" +
code +
"echo '___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___';";
var self = this;
return new Promise(function (resolve, error) {
self.runPhp(command)
.then(function (result: string) {
var out : string | null | RegExpExecArray = result;
out = /___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_OUTPUT___(.*)___VSCODE_LARAVEL_EXTRA_INSTELLISENSE_END_OUTPUT___/g.exec(out);
if (out) {
resolve(out[1]);
} else {
error("PARSE ERROR: " + result);
}
})
.catch(function (e : Error) {
error(e);
});
});
}
return new Promise((resolve, error) => resolve(""));
}
/**
* run simple php code.
*
* @param code
*/
static async runPhp(code: string) : Promise<string> | }
}
);
});
return out;
}
/**
* Parse php code with 'php-parser' package.
* @param code
*/
static parsePhp(code: string): any {
if (! Helpers.phpParser) {
var PhpEngine = require('php-parser');
Helpers.phpParser = new PhpEngine({
parser: {
extractDoc: true,
php7: true
},
ast: {
withPositions: true
}
});
}
try {
return Helpers.phpParser.parseCode(code);
} catch (exception) {
return null;
}
}
/**
* Convert php variable defination to javascript variable.
* @param code
*/
static evalPhp(code: string): any {
var out = Helpers.parsePhp('<?php ' + code + ';');
if (out && typeof out.children[0] !== 'undefined') {
return out.children[0].expression.value;
}
return undefined;
}
/**
* Parse php function call.
*
* @param text
* @param position
*/
static parseFunction(text: string, position: number, level: number = 0): any {
var out:any = null;
var classes = [];
for(let i in Helpers.tags) {
for (let j in Helpers.tags[i].classes) {
classes.push(Helpers.tags[i].classes[j]);
}
}
var regexPattern = "(((" + classes.join('|') + ")::)?([@A-Za-z0-9_]+))((\\()((?:[^)(]|\\((?:[^)(]|\\([^)(]*\\))*\\))*)(\\)|$))";
var functionRegex = new RegExp(regexPattern, "g");
var paramsRegex = /((\s*\,\s*)?)(\[[\s\S]*(\]|$)|array\[\s\S]*(\)|$)|(\"((\\\")|[^\"])*(\"|$))|(\'((\\\')|[^\'])*(\'|$)))/g;
var inlineFunctionMatch = /\((([\s\S]*\,)?\s*function\s*\(.*\)\s*\{)([\S\s]*)\}/g;
text = text.substr(Math.max(0, position - 200), 400);
position -= Math.max(0, position - 200);
var match = null;
var match2 = null;
if (Helpers.cachedParseFunction !== null && Helpers.cachedParseFunction.text === text && position === Helpers.cachedParseFunction.position) {
out = Helpers.cachedParseFunction.out;
} else if (level < 6) {
while ((match = functionRegex.exec(text)) !== null) {
if (position >= match.index && match[0] && position < match.index + match[0].length) {
if ((match2 = inlineFunctionMatch.exec(match[0])) !== null && typeof match2[3] === 'string' && typeof match[1] | {
code = code.replace(/\"/g, "\\\"");
if (['linux', 'openbsd', 'sunos', 'darwin'].some(unixPlatforms => os.platform().includes(unixPlatforms))) {
code = code.replace(/\$/g, "\\$");
code = code.replace(/\\\\'/g, '\\\\\\\\\'');
code = code.replace(/\\\\"/g, '\\\\\\\\\"');
}
let command = vscode.workspace.getConfiguration("LaravelExtraIntellisense").get<string>('phpCommand') ?? "php -r \"{code}\"";
command = command.replace("{code}", code);
let out = new Promise<string>(function (resolve, error) {
cp.exec(command,
{ cwd: vscode.workspace.workspaceFolders && vscode.workspace.workspaceFolders.length > 0 ? vscode.workspace.workspaceFolders[0].uri.fsPath : undefined },
function (err, stdout, stderr) {
if (stdout.length > 0) {
resolve(stdout);
} else {
if (Helpers.outputChannel !== null) {
Helpers.outputChannel.appendLine("Laravel extra intellisense Error: " + stderr);
}
error(stderr); | identifier_body |
httpProcess.go | iyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
| err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[: | check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, | identifier_body |
httpProcess.go | .
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers( | }
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func J
sonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey | "onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
} | conditional_block |
httpProcess.go | .
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
retu | := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{
stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[: | rn_message | identifier_name |
httpProcess.go | iyor.
broker.clientIpAdress =append(broker.clientIpAdress,ipAddress)
index = len(broker.clientIpAdress)-1
return
}
func (broker *Broker) findIpPosition(ipAddress string) (pos int) {//verilen ip'nin dizideki pozisyonunu buluyor.
for i,ip := range broker.clientIpAdress{
if ip == ipAddress{
pos = i
return
}
}
return
}
func (broker *Broker) closeClientToken(pos int){//kullanıcı çıktığında diziden çıkıyor.
broker.clientTokens = append(broker.clientTokens[:pos], broker.clientTokens[pos+1:]...)
}
func (broker *Broker) closeClientIpAddres(pos int){//kullanıcı çıktığında
broker.clientIpAdress = append(broker.clientIpAdress[:pos], broker.clientIpAdress[pos+1:]...)
}
func (broker *Broker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {//client serverimiza bağlanıca..
flusher, ok := rw.(http.Flusher)
if !ok {
http.Error(rw, "Streaming unsupported!", http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "text/event-stream")
rw.Header().Set("Cache-Control", "no-cache")//handshake için gerekli headerler
rw.Header().Set("Connection", "keep-alive")
rw.Header().Set("Access-Control-Allow-Origin", "*")
messageChan := make(chan []byte)
broker.newClients <- messageChan//yeni client bağlanında devreye girecek olan channel
newClientFullIp := req.RemoteAddr//client'in ip adresini alıyoruz.
index := broker.addNewClient(newClientFullIp)//client'in dizideki indisini alıyoruz.
token := broker.clientTokens[index]//ip indisini token dizisinde aynı indise eşitliyoruz.
client.SAdd("onlineUserTokens",token)//rediste uniq listemize bu kullanıcının tokenını ekliyoruz.
defer func() {//kullanıcı disconnect olduğunda bu channel notification olarak gidecek.
broker.closingClients <- messageChan
}()
notify := rw.(http.CloseNotifier).CloseNotify()//3000 portuna bağlı client çıkış yapınca notify devreye giriyor..
clientCloseFullIp := req.RemoteAddr// disconnect olan kullanıcının ip adresinin alıyoruz
go func() {//user disconnect olunca eş zamanlı bunu channel ile notification olarak gönderecez.
<-notify
posClientIp := broker.findIpPosition(clientCloseFullIp)//disconnect olan kullanıcının dizideki indisini buluyoruz
client.SRem("onlineUserTokens",broker.clientTokens[posClientIp])//yukarıda elde ettiğimiz indisteki tokeni bulıuyoruz ve bunu redisteki uniq listten çıkarıyoruz.
broker.closeClientToken(posClientIp)//user'i token dizisinden çıkarıyoruz
broker.closeClientIpAddres(posClientIp)//user'i ip dizisinden çıkarıyoruz.
broker.closingClients <- messageChan//close notification'u gönderiyoruz.
}()
for {//burada ilgili tüm userlere sırasıyla ilgili projelerine broadcast mesaj göndericez.
tokenPosition := broker.findIpPosition(newClientFullIp)//aktif userin token indisi
token := broker.clientTokens[tokenPosition]//aktif user'in tokeni.
data := ByteToStr(<-messageChan)//channel'den gelen mesajımız
parsedData := strings.Split(data,":")//redisten gelen datayı parse ediyoruz(kanal ve mesaj olarak geliyor.)
channels := strings.Split(parsedData[0],"_")//channel bilgisini elde ettik.
isFindUserToken := client.SIsMember(channels[0][:4],token).Val()//userlerin ilgili projelerine ilgili mesajı ayırt etmek için rediste listede kontrol yapıyoruz.
if isFindUserToken{
fmt.Fprintf(rw, "data: %s\n\n", channels[1]+"_"+parsedData[1])
flusher.Flush()
}
}
}
func (broker *Broker) listen() {//burada eş zamanlı olarak çalışan ilgili metotlarımızı tek bir noktadan yönetmek için select ifadesini kullanıyoruz.
for {
select {
case s := <-broker.newClients: //yeni bir client bağlandı..
broker.clients[s] = true
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
case s := <-broker.closingClients://Bir client ayrıldı ve mesaj göndermeyi bırakmak istiyoruz
onlieUsers := client.SMembers("onlineUserTokens")
fmt.Println("online Users:",onlieUsers)
onlineUsersCount := client.SCard("onlineUserTokens")
fmt.Println("online users count:",onlineUsersCount)
delete(broker.clients, s)
case event := <-broker.Notifier://sisteme bağlı tüm clientlere notify gönderiyoruz
for clientMessageChan, _ := range broker.clients {
clientMessageChan <- event
}
}
}
}
func check(err error, message string) {//genel hata yönetimi mekanizması
if err != nil {
panic(err)
}
fmt.Printf("%s\n", message)
}
func JsonStatus(message string, status int, jw http.ResponseWriter) {//Genel Response metodumuz
jw.Header().Set("Content-Type", "application/json")
return_message := &StatusMessage{Message: message, StatusCode: status}
jw.WriteHeader(http.StatusCreated)
json.NewEncoder(jw).Encode(return_message)
}
func ByteToStr(data []byte) string{//byte to str
d :=string(data[8:])
d = d[:len(d)-1]
return d
}
func randToken() string {//random token üreten fonksiyon
b := make([]byte, 8)
rand.Read(b)
return fmt.Sprintf("%x", b)
}
func handShake(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var u User
_ = json.NewDecoder(r.Body).Decode(&u)
if u.Password == "" || u.UserName == "" || u.SubKey == "" || u.PubKey == "" {
JsonStatus("Error! Required User name ,Password ,Subkey and Pubkey!" ,330, w)
return
}
isCheck , token := checkUser(u)
if isCheck == -1{
JsonStatus("Error! Invalid User Info!" ,330, w)
return
}else{
JsonStatus("Token:"+token ,200, w)
return
}
}
func checkUser(user User) (isCheck int ,token string) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
isCheck = -1
token =""
c := user
var userId,appId int
stmtOut, err := db.Prepare("SELECT user_id FROM account where user_name = ? AND password = ?")
check(err,"")
err = stmtOut.QueryRow(c.UserName,c.Password).Scan(&userId)
if userId > 0{ | check(err,"")
err = stmtOut.QueryRow(userId,c.PubKey,c.SubKey).Scan(&appId)
if appId > 0 {
isCheck = appId
token = randToken()
client.SAdd(c.SubKey[:8],token)
}
}
defer stmtOut.Close()
return
}
func checkSubKey(subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func checkPupKeySubKey(pubkey string,subkey string) (isCheck int) {//handchake yaparken gelen datamızı mysql ile kontrol ediyoruz
var appId int
stmtOut, err := db.Prepare("SELECT app_id FROM app where pub_key = ? AND sub_key = ?")
check(err,"")
err = stmtOut.QueryRow(pubkey,subkey).Scan(&appId)
if appId > 0{
isCheck =appId
}else{
isCheck = -1
}
defer stmtOut.Close()
return
}
func (broker *Broker)subscribeHttpToRedis(w http.ResponseWriter, r *http.Request) {//API üzerinden gelen subscribe
var s Subscribe
_ = json.NewDecoder(r.Body).Decode(&s)
if s.Token == "" || s.Subkey == "" {
JsonStatus("Error! Required Token and Subkey Subkey!" ,330, w)
return
}
stateSubkey := client.SIsMember("userSubkeys",s.Subkey[:5 | stmtOut, err := db.Prepare("SELECT app_id FROM app where user_id = ? AND pub_key = ? AND sub_key = ?") | random_line_split |
blocks.go | "blocks"
case "lz4", "gzip", "blocks", "uncompressed":
break
default:
err = fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
return
}
w.Header().Set("Content-type", "application/octet-stream")
// extract querey string
if blockstring == "" {
return
}
coordarray := strings.Split(blockstring, ",")
if len(coordarray)%3 != 0 {
return 0, fmt.Errorf("block query string should be three coordinates per block")
}
var store storage.KeyValueDB
if store, err = datastore.GetKeyValueDB(d); err != nil {
return
}
// launch goroutine that will stream blocks to client
numBlocks = len(coordarray) / 3
wg := new(sync.WaitGroup)
ch := make(chan blockSend, numBlocks)
var sendErr error
var startBlock dvid.ChunkPoint3d
var timing blockTiming
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else if len(data.value) > 0 {
t0 := time.Now()
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
timing.writeDone(t0)
}
wg.Done()
}
timedLog.Infof("labelmap %q specificblocks - finished sending %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
}()
// iterate through each block, get data from store, and transcode based on request parameters
for i := 0; i < len(coordarray); i += 3 {
var bcoord dvid.ChunkPoint3d
if bcoord, err = strArrayToBCoord(coordarray[i : i+3]); err != nil {
return
}
if i == 0 {
startBlock = bcoord
}
wg.Add(1)
t0 := time.Now()
indexBeg := dvid.IndexZYX(bcoord)
keyBeg := NewBlockTKey(scale, &indexBeg)
var value []byte
value, err = store.Get(ctx, keyBeg)
timing.readDone(t0)
if err != nil {
ch <- blockSend{err: err}
return
}
if len(value) > 0 {
go func(bcoord dvid.ChunkPoint3d, value []byte) {
b := blockData{
bcoord: bcoord,
v: ctx.VersionID(),
data: value,
compression: compression,
supervoxels: supervoxels,
}
t0 := time.Now()
out, err := d.transcodeBlock(b)
timing.transcodeDone(t0)
ch <- blockSend{bcoord: bcoord, value: out, err: err}
}(bcoord, value)
} else {
ch <- blockSend{value: nil}
}
}
timedLog.Infof("labelmap %q specificblocks - launched concurrent reads of %d blocks starting with %s", d.DataName(), numBlocks, startBlock)
wg.Wait()
close(ch)
dvid.Infof("labelmap %q specificblocks - %d blocks starting with %s: %s\n", d.DataName(), numBlocks, startBlock, &timing)
return numBlocks, sendErr
}
// sendBlocksVolume writes a series of blocks covering the given block-aligned subvolume to a HTTP response.
func (d *Data) sendBlocksVolume(ctx *datastore.VersionedCtx, w http.ResponseWriter, supervoxels bool, scale uint8, subvol *dvid.Subvolume, compression string) error {
w.Header().Set("Content-type", "application/octet-stream")
switch compression {
case "", "lz4", "gzip", "blocks", "uncompressed":
default:
return fmt.Errorf(`compression must be "lz4" (default), "gzip", "blocks" or "uncompressed"`)
}
// convert x,y,z coordinates to block coordinates for this scale
blocksdims := subvol.Size().Div(d.BlockSize())
blocksoff := subvol.StartPoint().Div(d.BlockSize())
timedLog := dvid.NewTimeLog()
defer timedLog.Infof("SendBlocks %s, span x %d, span y %d, span z %d", blocksoff, blocksdims.Value(0), blocksdims.Value(1), blocksdims.Value(2))
numBlocks := int(blocksdims.Prod())
wg := new(sync.WaitGroup)
// launch goroutine that will stream blocks to client
ch := make(chan blockSend, numBlocks)
var sendErr error
go func() {
for data := range ch {
if data.err != nil && sendErr == nil {
sendErr = data.err
} else {
err := writeBlock(w, data.bcoord, data.value)
if err != nil && sendErr == nil {
sendErr = err
}
}
wg.Done()
}
}()
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return fmt.Errorf("Data type labelmap had error initializing store: %v", err)
}
okv := store.(storage.BufferableOps)
// extract buffer interface
req, hasbuffer := okv.(storage.KeyValueRequester)
if hasbuffer {
okv = req.NewBuffer(ctx)
}
for ziter := int32(0); ziter < blocksdims.Value(2); ziter++ {
for yiter := int32(0); yiter < blocksdims.Value(1); yiter++ {
beginPoint := dvid.ChunkPoint3d{blocksoff.Value(0), blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
endPoint := dvid.ChunkPoint3d{blocksoff.Value(0) + blocksdims.Value(0) - 1, blocksoff.Value(1) + yiter, blocksoff.Value(2) + ziter}
indexBeg := dvid.IndexZYX(beginPoint)
sx, sy, sz := indexBeg.Unpack()
begTKey := NewBlockTKey(scale, &indexBeg)
indexEnd := dvid.IndexZYX(endPoint)
endTKey := NewBlockTKey(scale, &indexEnd)
// Send the entire range of key-value pairs to chunk processor
err = okv.ProcessRange(ctx, begTKey, endTKey, &storage.ChunkOp{}, func(c *storage.Chunk) error {
if c == nil || c.TKeyValue == nil {
return nil
}
kv := c.TKeyValue
if kv.V == nil {
return nil
}
// Determine which block this is.
_, indexZYX, err := DecodeBlockTKey(kv.K)
if err != nil {
return err
}
x, y, z := indexZYX.Unpack()
if z != sz || y != sy || x < sx || x >= sx+int32(blocksdims.Value(0)) {
return nil
}
b := blockData{
bcoord: dvid.ChunkPoint3d{x, y, z},
compression: compression,
supervoxels: supervoxels,
v: ctx.VersionID(),
data: kv.V,
}
wg.Add(1)
go func(b blockData) {
out, err := d.transcodeBlock(b)
ch <- blockSend{bcoord: b.bcoord, value: out, err: err}
}(b)
return nil
})
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
}
wg.Wait()
close(ch)
if hasbuffer {
// submit the entire buffer to the DB
err = okv.(storage.RequestBuffer).Flush()
if err != nil {
return fmt.Errorf("unable to GET data %s: %v", ctx, err)
}
}
return sendErr
}
// getSupervoxelBlock returns a compressed supervoxel Block of the given block coordinate.
func (d *Data) getSupervoxelBlock(v dvid.VersionID, bcoord dvid.ChunkPoint3d, scale uint8) (*labels.Block, error) | {
store, err := datastore.GetOrderedKeyValueDB(d)
if err != nil {
return nil, err
}
// Retrieve the block of labels
ctx := datastore.NewVersionedCtx(d, v)
index := dvid.IndexZYX(bcoord)
serialization, err := store.Get(ctx, NewBlockTKey(scale, &index))
if err != nil {
return nil, fmt.Errorf("error getting '%s' block for index %s", d.DataName(), bcoord)
}
if serialization == nil {
blockSize, ok := d.BlockSize().(dvid.Point3d)
if !ok {
return nil, fmt.Errorf("block size for data %q should be 3d, not: %s", d.DataName(), d.BlockSize())
}
return labels.MakeSolidBlock(0, blockSize), nil
} | identifier_body |
|
blocks.go | () string {
var readAvgT, transcodeAvgT, writeAvgT time.Duration
if bt.readN == 0 {
readAvgT = 0
} else {
readAvgT = bt.readT / time.Duration(bt.readN)
}
if bt.transcodeN == 0 {
transcodeAvgT = 0
} else {
transcodeAvgT = bt.transcodeT / time.Duration(bt.transcodeN)
}
if bt.readN == 0 {
writeAvgT = 0
} else {
writeAvgT = bt.writeT / time.Duration(bt.readN)
}
return fmt.Sprintf("read %s (%s), transcode %s (%s), write %s (%s)", bt.readT, readAvgT, bt.transcodeT, transcodeAvgT, bt.writeT, writeAvgT)
}
type blockData struct {
bcoord dvid.ChunkPoint3d
compression string
supervoxels bool
v dvid.VersionID
data []byte
}
// transcodes a block of data by doing any data modifications necessary to meet requested
// compression compared to stored compression as well as raw supervoxels versus mapped labels.
func (d *Data) transcodeBlock(b blockData) (out []byte, err error) {
formatIn, checksum := dvid.DecodeSerializationFormat(dvid.SerializationFormat(b.data[0]))
var start int
if checksum == dvid.CRC32 {
start = 5
} else {
start = 1
}
var outsize uint32
switch formatIn {
case dvid.LZ4:
outsize = binary.LittleEndian.Uint32(b.data[start : start+4])
out = b.data[start+4:]
if len(out) != int(outsize) {
err = fmt.Errorf("block %s was corrupted lz4: supposed size %d but had %d bytes", b.bcoord, outsize, len(out))
return
}
case dvid.Uncompressed, dvid.Gzip:
outsize = uint32(len(b.data[start:]))
out = b.data[start:]
default:
err = fmt.Errorf("labelmap data was stored in unknown compressed format: %s", formatIn)
return
}
var formatOut dvid.CompressionFormat
switch b.compression {
case "", "lz4":
formatOut = dvid.LZ4
case "blocks":
formatOut = formatIn
case "gzip":
formatOut = dvid.Gzip
case "uncompressed":
formatOut = dvid.Uncompressed
default:
err = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" {
return
}
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil
}
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale | String | identifier_name |
|
blocks.go | fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" |
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil
}
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int | {
return
} | conditional_block |
blocks.go | = fmt.Errorf("unknown compression %q requested for blocks", b.compression)
return
}
var doMapping bool
var mapping *VCache
if !b.supervoxels {
if mapping, err = getMapping(d, b.v); err != nil {
return
}
if mapping != nil && mapping.mapUsed {
doMapping = true
}
}
// Need to do uncompression/recompression if we are changing compression or mapping
var uncompressed, recompressed []byte
if formatIn != formatOut || b.compression == "gzip" || doMapping {
switch formatIn {
case dvid.LZ4:
uncompressed = make([]byte, outsize)
if err = lz4.Uncompress(out, uncompressed); err != nil {
return
}
case dvid.Uncompressed:
uncompressed = out
case dvid.Gzip:
gzipIn := bytes.NewBuffer(out)
var zr *gzip.Reader
zr, err = gzip.NewReader(gzipIn)
if err != nil {
return
}
uncompressed, err = ioutil.ReadAll(zr)
if err != nil {
return
}
zr.Close()
}
var block labels.Block
if err = block.UnmarshalBinary(uncompressed); err != nil {
err = fmt.Errorf("unable to deserialize label block %s: %v", b.bcoord, err)
return
}
if !b.supervoxels {
modifyBlockMapping(b.v, &block, mapping)
}
if b.compression == "blocks" { // send native DVID block compression with gzip
out, err = block.CompressGZIP()
if err != nil {
return nil, err
}
} else { // we are sending raw block data
uint64array, size := block.MakeLabelVolume()
expectedSize := d.BlockSize().(dvid.Point3d)
if !size.Equals(expectedSize) {
err = fmt.Errorf("deserialized label block size %s does not equal data %q block size %s", size, d.DataName(), expectedSize)
return
}
switch formatOut {
case dvid.LZ4:
recompressed = make([]byte, lz4.CompressBound(uint64array))
var size int
if size, err = lz4.Compress(uint64array, recompressed); err != nil {
return nil, err
}
outsize = uint32(size)
out = recompressed[:outsize]
case dvid.Uncompressed:
out = uint64array
case dvid.Gzip:
var gzipOut bytes.Buffer
zw := gzip.NewWriter(&gzipOut)
if _, err = zw.Write(uint64array); err != nil {
return nil, err
}
zw.Flush()
zw.Close()
out = gzipOut.Bytes()
}
}
}
return
}
// try to write a single block either by streaming (allows for termination) or by writing
// with a simplified pipeline compared to subvolumes larger than a block.
func (d *Data) writeBlockToHTTP(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, supervoxels bool, scale uint8, roiname dvid.InstanceName) (done bool, err error) {
// Can't handle ROI for now.
if roiname != "" {
return
}
// Can only handle 3d requests.
blockSize, okBlockSize := d.BlockSize().(dvid.Point3d)
subvolSize, okSubvolSize := subvol.Size().(dvid.Point3d)
startPt, okStartPt := subvol.StartPoint().(dvid.Point3d)
if !okBlockSize || !okSubvolSize || !okStartPt {
return
}
// Can only handle single block for now.
if subvolSize != blockSize {
return
}
// Can only handle aligned block for now.
chunkPt, aligned := dvid.GetChunkPoint3d(startPt, blockSize)
if !aligned {
return
}
if compression != "" {
err = d.sendCompressedBlock(ctx, w, subvol, compression, chunkPt, scale, supervoxels)
} else {
err = d.streamRawBlock(ctx, w, chunkPt, scale, supervoxels)
}
if err != nil {
return
}
return true, nil
}
// send a single aligned block of data via HTTP.
func (d *Data) sendCompressedBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, subvol *dvid.Subvolume, compression string, chunkPt dvid.ChunkPoint3d, scale uint8, supervoxels bool) error {
bcoordStr := chunkPt.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("unable to get label block %s", bcoordStr)
}
if !supervoxels {
vc, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, vc)
}
data, _ := block.MakeLabelVolume()
if err := writeCompressedToHTTP(compression, data, subvol, w); err != nil {
return err
}
return nil | bcoordStr := bcoord.ToIZYXString()
block, err := d.getLabelBlock(ctx, scale, bcoordStr)
if err != nil {
return err
}
if !supervoxels {
mapping, err := getMapping(d, ctx.VersionID())
if err != nil {
return err
}
modifyBlockMapping(ctx.VersionID(), block, mapping)
}
if err := block.WriteLabelVolume(w); err != nil {
return err
}
return nil
}
// returns nil block if no block is at the given block coordinate
func (d *Data) getLabelBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.Block, error) {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return nil, fmt.Errorf("labelmap getLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, bcoord)
val, err := store.Get(ctx, tk)
if err != nil {
return nil, fmt.Errorf("error on GET of labelmap %q label block @ %s", d.DataName(), bcoord)
}
if val == nil {
return nil, nil
}
data, _, err := dvid.DeserializeData(val, true)
if err != nil {
return nil, fmt.Errorf("unable to deserialize label block in %q: %v", d.DataName(), err)
}
block := new(labels.Block)
if err := block.UnmarshalBinary(data); err != nil {
return nil, err
}
return block, nil
}
func (d *Data) getLabelPositionedBlock(ctx *datastore.VersionedCtx, scale uint8, bcoord dvid.IZYXString) (*labels.PositionedBlock, error) {
block, err := d.getLabelBlock(ctx, scale, bcoord)
if err != nil {
return nil, err
}
if block == nil {
return nil, nil
}
return &labels.PositionedBlock{Block: *block, BCoord: bcoord}, nil
}
func (d *Data) putLabelBlock(ctx *datastore.VersionedCtx, scale uint8, pblock *labels.PositionedBlock) error {
store, err := datastore.GetKeyValueDB(d)
if err != nil {
return fmt.Errorf("labelmap putLabelBlock() had error initializing store: %v", err)
}
tk := NewBlockTKeyByCoord(scale, pblock.BCoord)
data, err := pblock.MarshalBinary()
if err != nil {
return err
}
val, err := dvid.SerializeData(data, d.Compression(), d.Checksum())
if err != nil {
return fmt.Errorf("unable to serialize block %s in %q: %v", pblock.BCoord, d.DataName(), err)
}
return store.Put(ctx, tk, val)
}
type blockSend struct {
bcoord dvid.ChunkPoint3d
value []byte
err error
}
// convert a slice of 3 integer strings into a coordinate
func strArrayToBCoord(coordarray []string) (bcoord dvid.ChunkPoint3d, err error) {
var xloc, yloc, zloc int
if xloc, err = strconv.Atoi(coordarray[0]); err != nil {
return
}
if yloc, err = strconv.Atoi(coordarray[1]); err != nil {
return
}
if zloc, err = strconv.Atoi(coordarray[2]); err != nil {
return
}
return dvid.ChunkPoint3d{int32 | }
// writes a block of data as uncompressed ZYX uint64 to the writer in streaming fashion, allowing
// for possible termination / error at any point.
func (d *Data) streamRawBlock(ctx *datastore.VersionedCtx, w http.ResponseWriter, bcoord dvid.ChunkPoint3d, scale uint8, supervoxels bool) error { | random_line_split |
external_accounts.go | AndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil |
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.Exec | {
return Mocks.ExternalAccounts.Delete(id)
} | conditional_block |
external_accounts.go | AndSave for that.
func (s *userExternalAccounts) LookupUserAndSave(ctx context.Context, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (userID int32, err error) {
if Mocks.ExternalAccounts.LookupUserAndSave != nil {
return Mocks.ExternalAccounts.LookupUserAndSave(spec, data)
}
err = dbconn.Global.QueryRowContext(ctx, `
UPDATE user_external_accounts SET auth_data=$5, account_data=$6, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
RETURNING user_id
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData).Scan(&userID)
if err == sql.ErrNoRows {
err = userExternalAccountNotFoundError{[]interface{}{spec}}
}
return userID, err
}
// AssociateUserAndSave is used for linking a new, additional external account with an existing
// Sourcegraph account.
//
// It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) | (ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx | TmpMigrate | identifier_name |
external_accounts.go | // It creates a user external account and associates it with the specified user. If the external
// account already exists and is associated with:
//
// - the same user: it updates the data and returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil { | random_line_split |
||
external_accounts.go | returns a nil error; or
// - a different user: it performs no update and returns a non-nil error
func (s *userExternalAccounts) AssociateUserAndSave(ctx context.Context, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (err error) {
if Mocks.ExternalAccounts.AssociateUserAndSave != nil {
return Mocks.ExternalAccounts.AssociateUserAndSave(userID, spec, data)
}
// This "upsert" may cause us to return an ephemeral failure due to a race condition, but it
// won't result in inconsistent data. Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
// Find whether the account exists and, if so, which user ID the account is associated with.
var exists bool
var existingID, associatedUserID int32
err = tx.QueryRowContext(ctx, `
SELECT id, user_id FROM user_external_accounts
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID).Scan(&existingID, &associatedUserID)
if err != nil && err != sql.ErrNoRows {
return err
}
exists = err != sql.ErrNoRows
err = nil
if exists && associatedUserID != userID {
// The account already exists and is associated with another user.
return fmt.Errorf("unable to change association of external account from user %d to user %d (delete the external account and then try again)", associatedUserID, userID)
}
if !exists {
// Create the external account (it doesn't yet exist).
return s.insert(ctx, tx, userID, spec, data)
}
// Update the external account (it exists).
res, err := tx.ExecContext(ctx, `
UPDATE user_external_accounts SET auth_data=$6, account_data=$7, updated_at=now()
WHERE service_type=$1 AND service_id=$2 AND client_id=$3 AND account_id=$4 AND user_id=$5 AND deleted_at IS NULL
`, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, userID, data.AuthData, data.AccountData)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{existingID}}
}
return nil
}
// CreateUserAndSave is used to create a new Sourcegraph user account from an external account
// (e.g., "signup from SAML").
//
// It creates a new user and associates it with the specified external account. If the user to
// create already exists, it returns an error.
func (s *userExternalAccounts) CreateUserAndSave(ctx context.Context, newUser NewUser, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) (createdUserID int32, err error) {
if Mocks.ExternalAccounts.CreateUserAndSave != nil {
return Mocks.ExternalAccounts.CreateUserAndSave(newUser, spec, data)
}
// Wrap in transaction.
tx, err := dbconn.Global.BeginTx(ctx, nil)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
rollErr := tx.Rollback()
if rollErr != nil {
err = multierror.Append(err, rollErr)
}
return
}
err = tx.Commit()
}()
createdUser, err := Users.create(ctx, tx, newUser)
if err != nil {
return 0, err
}
err = s.insert(ctx, tx, createdUser.ID, spec, data)
return createdUser.ID, err
}
func (s *userExternalAccounts) insert(ctx context.Context, tx *sql.Tx, userID int32, spec extsvc.ExternalAccountSpec, data extsvc.ExternalAccountData) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO user_external_accounts(user_id, service_type, service_id, client_id, account_id, auth_data, account_data)
VALUES($1, $2, $3, $4, $5, $6, $7)
`, userID, spec.ServiceType, spec.ServiceID, spec.ClientID, spec.AccountID, data.AuthData, data.AccountData)
return err
}
// Delete deletes a user external account.
func (*userExternalAccounts) Delete(ctx context.Context, id int32) error {
if Mocks.ExternalAccounts.Delete != nil {
return Mocks.ExternalAccounts.Delete(id)
}
res, err := dbconn.Global.ExecContext(ctx, "UPDATE user_external_accounts SET deleted_at=now() WHERE id=$1 AND deleted_at IS NULL", id)
if err != nil {
return err
}
nrows, err := res.RowsAffected()
if err != nil {
return err
}
if nrows == 0 {
return userExternalAccountNotFoundError{[]interface{}{id}}
}
return nil
}
// ExternalAccountsListOptions specifies the options for listing user external accounts.
type ExternalAccountsListOptions struct {
UserID int32
ServiceType, ServiceID, ClientID string
*LimitOffset
}
func (s *userExternalAccounts) List(ctx context.Context, opt ExternalAccountsListOptions) ([]*extsvc.ExternalAccount, error) {
if Mocks.ExternalAccounts.List != nil {
return Mocks.ExternalAccounts.List(opt)
}
conds := s.listSQL(opt)
return s.listBySQL(ctx, sqlf.Sprintf("WHERE %s ORDER BY id ASC %s", sqlf.Join(conds, "AND"), opt.LimitOffset.SQL()))
}
func (s *userExternalAccounts) Count(ctx context.Context, opt ExternalAccountsListOptions) (int, error) {
if Mocks.ExternalAccounts.Count != nil {
return Mocks.ExternalAccounts.Count(opt)
}
conds := s.listSQL(opt)
q := sqlf.Sprintf("SELECT COUNT(*) FROM user_external_accounts WHERE %s", sqlf.Join(conds, "AND"))
var count int
err := dbconn.Global.QueryRowContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...).Scan(&count)
return count, err
}
// TmpMigrate implements the migration described in bg.MigrateExternalAccounts (which is the only
// func that should call this).
func (*userExternalAccounts) TmpMigrate(ctx context.Context, serviceType string) error {
// TEMP: Delete all external accounts associated with deleted users. Due to a bug in this
// migration code, it was possible for deleted users to be associated with non-deleted external
// accounts. This caused unexpected behavior in the UI (although did not pose a security
// threat). So, run this cleanup task upon each server startup.
if err := (userExternalAccounts{}).deleteForDeletedUsers(ctx); err != nil {
log15.Warn("Unable to clean up external user accounts.", "err", err)
}
const needsMigrationSentinel = "migration_in_progress"
// Avoid running UPDATE (which takes a lock) if it's not needed. The UPDATE only needs to run
// once ever, and we are guaranteed that the DB migration has run by the time we arrive here, so
// this is safe and not racy.
var needsMigration bool
if err := dbconn.Global.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM user_external_accounts WHERE service_type=$1 AND deleted_at IS NULL)`, needsMigrationSentinel).Scan(&needsMigration); err != nil && err != sql.ErrNoRows {
return err
}
if !needsMigration {
return nil
}
var err error
if serviceType == "" {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now(), service_type='not_configured_at_migration_time' WHERE service_type=$1`, needsMigrationSentinel)
} else {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type=$2, account_id=SUBSTR(account_id, CHAR_LENGTH(service_id)+2) WHERE service_type=$1 AND service_id!='override'`, needsMigrationSentinel, serviceType)
if err == nil {
_, err = dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET service_type='override', service_id='' WHERE service_type=$1 AND service_id='override'`, needsMigrationSentinel)
}
}
return err
}
func (userExternalAccounts) deleteForDeletedUsers(ctx context.Context) error {
_, err := dbconn.Global.ExecContext(ctx, `UPDATE user_external_accounts SET deleted_at=now() FROM users WHERE user_external_accounts.user_id=users.id AND users.deleted_at IS NOT NULL AND user_external_accounts.deleted_at IS NULL`)
return err
}
func (s *userExternalAccounts) getBySQL(ctx context.Context, querySuffix *sqlf.Query) (*extsvc.ExternalAccount, error) | {
results, err := s.listBySQL(ctx, querySuffix)
if err != nil {
return nil, err
}
if len(results) != 1 {
return nil, userExternalAccountNotFoundError{querySuffix.Args()}
}
return results[0], nil
} | identifier_body |
|
smt.rs | 3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn | (c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
| bench_ckb_transfer | identifier_name |
smt.rs | // meta contract
const META_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
| random_line_split |
||
smt.rs | 3.0-rc1/meta-contract-generator";
const META_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [1u8; 32];
// sudt contract
const SUDT_GENERATOR_PATH: &str =
"../../crates/builtin-binaries/builtin/gwos-v1.3.0-rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> |
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
| {
unreachable!("bench chain store")
} | identifier_body |
smt.rs | rc1/sudt-generator";
const SUDT_VALIDATOR_SCRIPT_TYPE_HASH: [u8; 32] = [2u8; 32];
// always success lock
const ALWAYS_SUCCESS_LOCK_HASH: [u8; 32] = [3u8; 32];
// rollup type hash
const ROLLUP_TYPE_HASH: [u8; 32] = [4u8; 32];
const CKB_BALANCE: u128 = 100_000_000;
criterion_group! {
name = smt;
config = Criterion::default()
.with_profiler(PProfProfiler::new(100, Output::Flamegraph(None)));
targets = bench_ckb_transfer
}
pub fn bench_ckb_transfer(c: &mut Criterion) {
let config = StoreConfig {
path: "./smt_data/db".parse().unwrap(),
options_file: Some("./smt_data/db.toml".parse().unwrap()),
cache_size: Some(1073741824),
};
let store = Store::open(&config, COLUMNS).unwrap();
let ee = BenchExecutionEnvironment::new_with_accounts(store, 7000);
let mut group = c.benchmark_group("ckb_transfer");
for txs in (500..=5000).step_by(500) {
group.sample_size(10);
group.throughput(Throughput::Elements(txs));
group.bench_with_input(BenchmarkId::from_parameter(txs), &txs, |b, txs| {
b.iter(|| {
ee.accounts_transfer(7000, *txs as usize);
});
});
}
group.finish();
}
#[allow(dead_code)]
struct Account {
id: u32,
}
impl Account {
fn build_script(n: u32) -> (Script, RegistryAddress) {
let mut addr = [0u8; 20];
addr[..4].copy_from_slice(&n.to_le_bytes());
let mut args = vec![42u8; 32];
args.extend(&addr);
let script = Script::new_builder()
.code_hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.hash_type(ScriptHashType::Type.into())
.args(args.pack())
.build();
let addr = RegistryAddress::new(ETH_REGISTRY_ACCOUNT_ID, addr.to_vec());
(script, addr)
}
}
struct BenchChain;
impl ChainView for BenchChain {
fn get_block_hash_by_number(&self, _: u64) -> Result<Option<H256>> {
unreachable!("bench chain store")
}
}
struct BenchExecutionEnvironment {
generator: Generator,
chain: BenchChain,
mem_pool_state: MemPoolState,
}
impl BenchExecutionEnvironment {
fn new_with_accounts(store: Store, accounts: u32) -> Self {
let genesis_config = GenesisConfig {
meta_contract_validator_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
rollup_type_hash: ROLLUP_TYPE_HASH.into(),
rollup_config: RollupConfig::new_builder()
.l2_sudt_validator_script_type_hash(SUDT_VALIDATOR_SCRIPT_TYPE_HASH.pack())
.allowed_eoa_type_hashes(
vec![AllowedTypeHash::new_builder()
.hash(ALWAYS_SUCCESS_LOCK_HASH.pack())
.type_(AllowedEoaType::Eth.into())
.build()]
.pack(),
)
.build()
.into(),
..Default::default()
};
let rollup_context = RollupContext {
rollup_config: genesis_config.rollup_config.clone().into(),
rollup_script_hash: ROLLUP_TYPE_HASH,
..Default::default()
};
let backend_manage = {
let configs = vec![
BackendConfig {
generator: Resource::file_system(META_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&META_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: META_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Meta,
},
BackendConfig {
generator: Resource::file_system(SUDT_GENERATOR_PATH.into()),
generator_checksum: file_checksum(&SUDT_GENERATOR_PATH).unwrap().into(),
validator_script_type_hash: SUDT_VALIDATOR_SCRIPT_TYPE_HASH.into(),
backend_type: gw_config::BackendType::Sudt,
},
];
BackendManage::from_config(vec![BackendForkConfig {
sudt_proxy: Default::default(),
fork_height: 0,
backends: configs,
}])
.expect("bench backend")
};
let account_lock_manage = {
let mut manage = AccountLockManage::default();
manage.register_lock_algorithm(ALWAYS_SUCCESS_LOCK_HASH, Arc::new(AlwaysSuccess));
manage
};
let generator = Generator::new(
backend_manage,
account_lock_manage,
rollup_context,
Default::default(),
);
Self::init_genesis(&store, &genesis_config, accounts);
let mem_pool_state = MemPoolState::new(
MemStateDB::from_store(store.get_snapshot()).expect("mem state db"),
true,
);
BenchExecutionEnvironment {
generator,
chain: BenchChain,
mem_pool_state,
}
}
fn accounts_transfer(&self, accounts: u32, count: usize) {
let mut state = self.mem_pool_state.load_state_db();
let (block_producer_script, block_producer) = Account::build_script(0);
let block_info = BlockInfo::new_builder()
.block_producer(Bytes::from(block_producer.to_bytes()).pack())
.number(1.pack())
.timestamp(1.pack())
.build();
let block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
let addrs: Vec<_> = (0..=accounts)
.map(Account::build_script)
.map(|(_s, addr)| addr)
.collect();
let address_offset = state
.get_account_id_by_script_hash(&block_producer_script.hash())
.unwrap()
.unwrap(); // start from block producer
let start_account_id = address_offset + 1;
let end_account_id = address_offset + accounts;
// Loop transfer from id to id + 1, until we reach target count
let mut from_id = start_account_id;
let mut transfer_count = count;
while transfer_count > 0 {
let to_address = {
let mut to_id = from_id + 1;
if to_id > end_account_id {
to_id = start_account_id;
}
addrs.get((to_id - address_offset) as usize).unwrap()
};
let args = SUDTArgs::new_builder()
.set(
SUDTTransfer::new_builder()
.to_address(Bytes::from(to_address.to_bytes()).pack())
.amount(U256::one().pack())
.fee(
Fee::new_builder()
.registry_id(ETH_REGISTRY_ACCOUNT_ID.pack())
.amount(1u128.pack())
.build(),
)
.build(),
)
.build();
let raw_tx = RawL2Transaction::new_builder()
.from_id(from_id.pack())
.to_id(1u32.pack())
.args(args.as_bytes().pack())
.build();
self.generator
.execute_transaction(
&self.chain,
&mut state,
&block_info,
&raw_tx,
Some(u64::MAX),
None,
)
.unwrap();
state.finalise().unwrap();
from_id += 1;
if from_id > end_account_id {
from_id = start_account_id;
}
transfer_count -= 1;
}
self.mem_pool_state.store_state_db(state);
let state = self.mem_pool_state.load_state_db();
let post_block_producer_balance = state
.get_sudt_balance(CKB_SUDT_ACCOUNT_ID, &block_producer)
.unwrap();
assert_eq!(
post_block_producer_balance,
block_producer_balance + count as u128
);
}
fn generate_accounts(
state: &mut (impl State + StateExt + CodeStore),
accounts: u32,
) -> Vec<Account> {
let build_account = |idx: u32| -> Account {
let (account_script, addr) = Account::build_script(idx);
let account_script_hash: H256 = account_script.hash();
let account_id = state.create_account(account_script_hash).unwrap();
state.insert_script(account_script_hash, account_script);
state
.mapping_registry_address_to_script_hash(addr.clone(), account_script_hash)
.unwrap();
state
.mint_sudt(CKB_SUDT_ACCOUNT_ID, &addr, CKB_BALANCE.into())
.unwrap();
Account { id: account_id }
};
(0..accounts).map(build_account).collect()
}
fn init_genesis(store: &Store, config: &GenesisConfig, accounts: u32) {
if store.has_genesis().unwrap() {
let chain_id = store.get_chain_id().unwrap();
if chain_id == ROLLUP_TYPE_HASH {
return;
} else | {
panic!("store genesis already initialized");
} | conditional_block |
|
run.py | for s in ltm:
m.absorb(s, timesteps=asteps)
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
|
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a | run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q | identifier_body |
run.py |
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2 | for s in ltm:
m.absorb(s, timesteps=asteps) | conditional_block |
|
run.py | for s in ltm:
m.absorb(s, timesteps=asteps)
set_global_param('punishing', pun)
set_global_param('painter_clarity', pcl)
set_global_param('cell_clarity', ccl)
set_log_level(llr)
lo(1, 'LTS\n' + m.lts.state_str())
#lo(1, 'LTS\n' + m.lts.state_str_with_authors())
if rsteps:
set_global_param('allow_ab_initio_painters', abr)
m.regen_from(seed, nsteps=rsteps)
set_global_param('allow_ab_initio_painters', True)
print(m.canvas)
print()
print(m.ws.state_str_with_authors())
def again(**kwargs):
global last_args
run(fresh=False, **(last_args | kwargs))
def run1(**kwargs):
'''1st example in dissertation.'''
set_latex_mode()
run(ab=[ab1, ab3], **kwargs)
def run_bad(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=0,
lla=4
)
run(**(d | kwargs))
def run_test(**kwargs) -> None:
d = dict(
auto_annotate=[Start, End],
asteps=100,
rsteps=60,
lla=6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def | ():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2 | runabs2 | identifier_name |
run.py | 6,
llr=6,
seed='m '
)
run(**(d | kwargs))
def run_pons(**kwargs) -> None:
'''Runs the pons asinorum.'''
lo(0, "pons asinorum")
d = dict(
ltm=[],
asteps=0,
seed='abcabdijk ',
rsteps=200, #500,
#llr=4,
auto_annotate=[]
)
run(**(d | kwargs))
def h(*ids):
'''Plot history of painters with given ids.'''
global m
for i in ids:
#plt.plot(range(1, m.t + 1), m.history_of(i))
#plt.plot(*zip(m.history_of(i)))
hs = list(zip(*m.history_of(i)))
pts(hs)
print(len(hs))
plt.plot(*hs)
plt.show()
def as_lts(s: str) -> List[str]:
if not s:
return []
elif ',' not in s:
return [s]
else:
return s.split(',')
def parse_and_run() -> None:
global rngseed
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--rngseed",
help="random-number seed",
type=int
)
parser.add_argument("--ltm", help="the long-term soup", default='ajaqb')
parser.add_argument(
"seed",
help="the seed string",
default='a ',
nargs='?'
)
parser.add_argument(
"--asteps",
help="number of timesteps to absorb each LTS string",
type=int,
default=40
)
parser.add_argument(
"--rsteps",
help="number of timesteps to regenerate",
type=int,
default=60
)
parser.add_argument(
"--lla",
help="logging level during absorption",
type=int,
default=0
)
parser.add_argument(
"--llr",
help="logging level during regeneration",
type=int,
default=2
)
parser.add_argument(
"--au",
help="which set of cell attributes to auto-annotate",
type=int,
choices=[0, 1],
default=global_params.auto_annotations
)
args = parser.parse_args()
global_params.auto_annotations = args.au
run(
seed=args.seed,
ltm=as_lts(args.ltm),
asteps=args.asteps,
rsteps=args.rsteps,
lla=args.lla,
llr=args.llr,
rng=args.rngseed
)
lo(0, f'rngseed={rngseed}{newline}')
def set_rngseed(r: Optional[int]=None) -> None:
global rngseed
rngseed = reseed(r)
lo(0, f'rngseed={rngseed}{newline}')
def runabs1():
# abs painters only
run(seed='a ', ltm=['abcde'], ab=[ab1])
def runabs2():
# abs painters with a different seed
run(seed='m ', ltm=['abcde'], ab=[ab1])
def runabs_ajaqb(**kwargs):
run(seed='a ', ltm=['ajaqb'], ab=[ab1])
# no reln with j or q
def runab13_ajaqb(seed='a '):
run(seed, ltm=['ajaqb'], ab=[ab1, ab3])
def runabs3():
# abs painters with more LTM
run(seed='a ', ltm=['abcde', 'ggggg'], ab=[ab1])
# problem: many sames => many nonadjacent 'same' painters
# therefore 'aaaaa' wins
def runrel():
kw = dict(
seed='a ',
ltm=['abcde'],
ab=[ab1, ab2],
asteps=100,
rsteps=40,
exc=True
)
run(**kw)
def runab123(**kwargs):
kw = dict(ltm=['ajaqb'], ab=[ab1, ab2, ab3], asteps=100, exc=True) | kwargs
run(**kw)
# NEXT Try absolute & digraph painters with a big LTM to see if that creates
# a need for clarity.
def run_ad(**kwargs):
# make relative (digraph) painters: gets 'aqb ' a lot
kw = dict(
#ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde', 'aabbc'],
#ltm=['ajaqb', 'abcde'],
ltm=['ajaqb'],
ab=[ab4],
asteps=30,
rsteps=25,
lla=0,
llr=1,
pun=False,
exc=True
) | kwargs
run(**kw)
def run_ad2(**kwargs):
# blends some simple strings
kw = dict(ltm=['ajaqb', 'abcde', 'aabbc']) | kwargs
run(**kw)
def r(d: dict, **kwargs):
'''Convenience function for running experiments in the REPL. Optionally
override a dict of arguments to run().'''
run(**(d | kwargs))
# A run just like the Hopfield net: many strings in memory, absolute painters
# only.
hoplike = dict(
ltm=['ajaqb', 'pqrst', 'abcde', 'aabbc', 'ponop'],
ab=[ab1],
asteps=40,
rsteps=100,
lla=0,
llr=2,
pun=False,
pcl=False,
exc=False
)
hoplike_long = hoplike | dict(
ltm=['abrsecpbqf', 'efghijklmn', 'tfserfdqgc', 'abcdepomnl'],
seed='abr e bqf'
)
hoplike_long_easy = hoplike | dict(
ltm=['aaaaabca', 'gggvvwgg', 'pqrspqrs'],
seed='aaaa a',
ab=[ab1a],
rsteps=30,
ccl=False
)
example1 = hoplike_long_easy | dict(
#seed='aa a',
seed=' aabb ',
rsteps=20, #50,
abr=False,
ccl=False,
rng=444834040015719226, #8066335492150159463,
llr=2
)
example2 = example1 | dict(
ccl=True,
rsteps=40,
)
rel0 = dict( # reconstructs with abs painters only, because seed is aligned
# right
ltm=['bbbbghij'],
seed='b g ',
ab=[ab1a],
asteps=150,
rsteps=50,
abr=False,
pcl=False,
llr=2
)
rel1 = rel0 | dict( # shows inability of abs painters to adjust to a shift (??)
seed='b gh ',
rsteps=50,
rng=620217766481971979
)
# PROBLEM This actually generates the desired analogy! bbbghijk
# We need an illustration of how absolute painters can't follow a shift.
rel2 = rel1 | dict( # relative painters: a bit nicer
ab=[ab4]
)
rel0a = rel0 | dict(
ltm=['bbbbghib'],
seed='b g '
)
rel1a = rel0a | dict(
seed='b g ',
)
rel2a = rel1a | dict(
ab=[ab4],
rsteps=120,
rng=682026381905476632
)
rel3a = rel2a | dict(
seed='c s '
)
# Can we solve 'ajaqb' without relative indirect painters?
quest1 = dict(
ltm=['ajaqb'],
seed='a ',
asteps=40,
rsteps=40,
ab=[ab1, ab3],
pun=False,
exc=True
)
# No.
# seed='a a ' also fails, because without absolute or relative indirect
# painters, the LTS has no record of the a_a and a_b relationships.
# A relative indirect painter can recreate that relationship wherever it
# sees an 'a' or 'b'.
# Does adding relative indirect solve that problem?
quest2 = quest1 | dict(
ab=[ab1, ab2, ab3]
)
# Does adding relative painters get hoplike_long to regenerate the memories
# reliably?
quest3 = hoplike_long | dict(
ab=[ab1, ab4]
)
# Is clarity needed to settle down on an attractor?
quest4 = hoplike_long | dict(
ccl=False
)
cdecb = dict(
seed=' e ',
ltm=['cdecb'], | ab=[ab1a],
asteps=30, | random_line_split |
|
rfc7539_test.go | , 0x9e, 0x96, 0xd6,
0x47, 0xb7, 0xc3, 0x9f, 0x56, 0xe0, 0x31, 0xca, 0x5e, 0xb6, 0x25, 0x0d,
0x40, 0x42, 0xe0, 0x27, 0x85, 0xec, 0xec, 0xfa, 0x4b, 0x4b, 0xb5, 0xe8,
0xea, 0xd0, 0x44, 0x0e, 0x20, 0xb6, 0xe8, 0xdb, 0x09, 0xd8, 0x81, 0xa7,
0xc6, 0x13, 0x2f, 0x42, 0x0e, 0x52, 0x79, 0x50, 0x42, 0xbd, 0xfa, 0x77,
0x73, 0xd8, 0xa9, 0x05, 0x14, 0x47, 0xb3, 0x29, 0x1c, 0xe1, 0x41, 0x1c,
0x68, 0x04, 0x65, 0x55, 0x2a, 0xa6, 0xc4, 0x05, 0xb7, 0x76, 0x4d, 0x5e,
0x87, 0xbe, 0xa8, 0x5a, 0xd0, 0x0f, 0x84, 0x49, 0xed, 0x8f, 0x72, 0xd0,
0xd6, 0x62, 0xab, 0x05, 0x26, 0x91, 0xca, 0x66, 0x42, 0x4b, 0xc8, 0x6d,
0x2d, 0xf8, 0x0e, 0xa4, 0x1f, 0x43, 0xab, 0xf9, 0x37, 0xd3, 0x25, 0x9d,
0xc4, 0xb2, 0xd0, 0xdf, 0xb4, 0x8a, 0x6c, 0x91, 0x39, 0xdd, 0xd7, 0xf7,
0x69, 0x66, 0xe9, 0x28, 0xe6, 0x35, 0x55, 0x3b, 0xa7, 0x6c, 0x5c, 0x87,
0x9d, 0x7b, 0x35, 0xd4, 0x9e, 0xb2, 0xe6, 0x2b, 0x08, 0x71, 0xcd, 0xac,
0x63, 0x89, 0x39, 0xe2, 0x5e, 0x8a, 0x1e, 0x0e, 0xf9, 0xd5, 0x28, 0x0f,
0xa8, 0xca, 0x32, 0x8b, 0x35, 0x1c, 0x3c, 0x76, 0x59, 0x89, 0xcb, 0xcf,
0x3d, 0xaa, 0x8b, 0x6c, 0xcc, 0x3a, 0xaf, 0x9f, 0x39, 0x79, 0xc9, 0x2b,
0x37, 0x20, 0xfc, 0x88, 0xdc, 0x95, 0xed, 0x84, 0xa1, 0xbe, 0x05, 0x9c,
0x64, 0x99, 0xb9, 0xfd, 0xa2, 0x36, 0xe7, 0xe8, 0x18, 0xb0, 0x4b, 0x0b,
0xc3, 0x9c, 0x1e, 0x87, 0x6b, 0x19, 0x3b, 0xfe, 0x55, 0x69, 0x75, 0x3f,
0x88, 0x12, 0x8c, 0xc0, 0x8a, 0xaa, 0x9b, 0x63, 0xd1, 0xa1, 0x6f, 0x80,
0xef, 0x25, 0x54, 0xd7, 0x18, 0x9c, 0x41, 0x1f, 0x58, 0x69, 0xca, 0x52,
0xc5, 0xb8, 0x3f, 0xa3, 0x6f, 0xf2, 0x16, 0xb9, 0xc1, 0xd3, 0x00, 0x62,
0xbe, 0xbc, 0xfd, 0x2d, 0xc5, 0xbc, 0xe0, 0x91, 0x19, 0x34, 0xfd, 0xa7,
0x9a, 0x86, 0xf6, 0xe6, 0x98, 0xce, 0xd7, 0x59, 0xc3, 0xff, 0x9b, 0x64,
0x77, 0x33, 0x8f, 0x3d, 0xa4, 0xf9, 0xcd, 0x85, 0x14, 0xea, 0x99, 0x82,
0xcc, 0xaf, 0xb3, 0x41, 0xb2, 0x38, 0x4d, 0xd9, 0x02, 0xf3, 0xd1, 0xab,
0x7a, 0xc6, 0x1d, 0xd2, 0x9c, 0x6f, 0x21, 0xba, 0x5b, 0x86, 0x2f, 0x37,
0x30, 0xe3, 0x7c, 0xfd, 0xc4, 0xfd, 0x80, 0x6c, 0x22, 0xf2, 0x21}},
{"'Twas brillig, and the slithy toves\nDid gyre and gimble in the " + | "wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.",
[32]uint8{0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, 0xf3, 0x33,
0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, 0x47, 0x39, 0x17, 0xc1, 0x40,
0x2b, 0x80, 0x09, 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0}, | random_line_split |
|
rfc7539_test.go | x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
}
}
}
func TestChaCha20Encryption(t *testing.T) | {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x2e, 0x35, 0x9a, 0x25,
0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 0xe9,
0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd,
0x9f, 0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f,
0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6, | identifier_body |
|
rfc7539_test.go | 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) |
}
}
func TestChaCha20Encryption(t *testing.T) {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0 | {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
} | conditional_block |
rfc7539_test.go | 0x81, 0x60, 0xb8, 0x22, 0x84, 0xf3, 0xc9,
0x49, 0xaa, 0x5a, 0x8e, 0xca, 0x00, 0xbb, 0xb4, 0xa7, 0x3b, 0xda, 0xd1,
0x92, 0xb5, 0xc4, 0x2f, 0x73, 0xf2, 0xfd, 0x4e, 0x27, 0x36, 0x44, 0xc8,
0xb3, 0x61, 0x25, 0xa6, 0x4a, 0xdd, 0xeb, 0x00, 0x6c, 0x13, 0xa0}},
{[32]uint8{0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(2), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, []byte{0x72, 0xd5, 0x4d, 0xfb,
0xf1, 0x2e, 0xc4, 0x4b, 0x36, 0x26, 0x92, 0xdf, 0x94, 0x13, 0x7f, 0x32,
0x8f, 0xea, 0x8d, 0xa7, 0x39, 0x90, 0x26, 0x5e, 0xc1, 0xbb, 0xbe, 0xa1,
0xae, 0x9a, 0xf0, 0xca, 0x13, 0xb2, 0x5a, 0xa2, 0x6c, 0xb4, 0xa6, 0x48,
0xcb, 0x9b, 0x9d, 0x1b, 0xe6, 0x5b, 0x2c, 0x09, 0x24, 0xa6, 0x6c, 0x54,
0xd5, 0x45, 0xec, 0x1b, 0x73, 0x74, 0xf4, 0x87, 0x2e, 0x99, 0xf0, 0x96}},
{[32]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00}, uint32(0), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, []byte{0xc2, 0xc6, 0x4d, 0x37,
0x8c, 0xd5, 0x36, 0x37, 0x4a, 0xe2, 0x04, 0xb9, 0xef, 0x93, 0x3f, 0xcd,
0x1a, 0x8b, 0x22, 0x88, 0xb3, 0xdf, 0xa4, 0x96, 0x72, 0xab, 0x76, 0x5b,
0x54, 0xee, 0x27, 0xc7, 0x8a, 0x97, 0x0e, 0x0e, 0x95, 0x5c, 0x14, 0xf3,
0xa8, 0x8e, 0x74, 0x1b, 0x97, 0xc2, 0x86, 0xf7, 0x5f, 0x8f, 0xc2, 0x99,
0xe8, 0x14, 0x83, 0x62, 0xfa, 0x19, 0x8a, 0x39, 0x53, 0x1b, 0xed, 0x6d}}}
for _, test := range testCases {
cipher := rfc7539.ChaCha20{test.key, test.counter, test.nonce, plaintext}
encrypt := rfc7539.Encrypt(&cipher)
if !bytes.Equal(encrypt, test.ciphertext) {
t.Errorf("key: % x\n nonce: % x\n block counter: %d\n "+
"keystream: % x\nexpected: % x", test.key, test.nonce, test.counter,
encrypt, test.ciphertext)
}
}
}
func | (t *testing.T) {
type testCase struct {
plaintext string
key [32]uint8
counter uint32
nonce [12]uint8
ciphertext []byte
}
testCases := [...]testCase{{"Ladies and Gentlemen of the class of '99: " +
"If I could offer you only one tip for the future, sunscreen would be it.",
[32]uint8{0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f}, uint32(1), [12]uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x4a, 0x00, 0x00, 0x00, 0x00}, []byte{0x6e, 0x | TestChaCha20Encryption | identifier_name |
stream.rs | ],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn | (&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> | create_gamma | identifier_name |
stream.rs | (r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> SphinxResult<()> {
replayer.replay_check(&self.replay_code)
}
/// Returns full key schedule for the lioness cipher for the body.
pub fn lioness_key(&mut self) -> [u8; BODY_CIPHER_KEY_SIZE] {
let lioness_key = &mut [0u8; BODY_CIPHER_KEY_SIZE];
self.stream.seek_to(self.chunks.lioness_key.start as u64).unwrap();
self.stream.xor_read(lioness_key).unwrap();
*lioness_key
}
pub fn body_cipher(&mut self) -> BodyCipher<P> {
BodyCipher {
params: PhantomData,
cipher: ::lioness::LionessDefault::new_raw(& self.lioness_key())
}
}
/// Returns the curve25519 scalar for blinding alpha in Sphinx.
pub fn blinding(&mut self) -> ::curve::Scalar {
let b = &mut [0u8; 64];
self.stream.seek_to(self.chunks.blinding.start as u64).unwrap();
self.stream.xor_read(b).unwrap();
::curve::Scalar::make(b)
}
/// Returns our name for the packet for insertion into the SURB log
/// if the packet gets reforwarded.
pub fn packet_name(&mut self) -> &PacketName {
&self.packet_name
}
pub fn xor_beta(&mut self, beta: &mut [u8], offset: usize, tail: usize)
-> SphinxResult<()> {
let len = P::BETA_LENGTH as usize - offset;
if beta.len() < len {
return Err( SphinxError::InternalError("Beta too short to encrypt!") );
}
if tail > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Excessive tail length requested!") );
}
if beta.len() > len+tail {
return Err( SphinxError::InternalError("Beta too long to encrypt!") );
}
self.stream.seek_to((self.chunks.beta.start + offset) as u64).unwrap();
self.stream.xor_read(beta).unwrap();
Ok(())
}
pub fn set_beta_tail(&mut self, beta_tail: &mut [u8]) -> SphinxResult<()> {
if beta_tail.len() > P::MAX_BETA_TAIL_LENGTH as usize {
return Err( SphinxError::InternalError("Beta's tail is too long!") );
}
for i in beta_tail.iter_mut() { *i = 0; }
self.stream.seek_to(self.chunks.beta_tail.start as u64).unwrap();
self.stream.xor_read(beta_tail).unwrap();
Ok(())
}
pub fn xor_surb_log(&mut self, surb_log: &mut [u8]) -> SphinxResult<()> {
if surb_log.len() > P::SURB_LOG_LENGTH as usize | {
return Err( SphinxError::InternalError("SURB log too long!") );
} | conditional_block |
|
stream.rs | ],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key },
}
}
/// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> |
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) | {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
} | identifier_body |
stream.rs | 2],
}
impl ChaChaKnN {
/// Initalize an IETF ChaCha20 stream cipher with our key material
/// and use it to generate the poly1305 key for our MAC gamma, and
/// the packet's name for SURB unwinding.
///
/// Notes: We could improve performance by using the curve25519 point
/// derived in the key exchagne directly as the key for an XChaCha20
/// instance, which includes some mixing, and using chacha for the
/// replay code and gamma key. We descided to use SHA3's SHAKE256
/// mode so that we have more and different mixing.
pub fn header_cipher<P: Params>(&self) -> SphinxResult<HeaderCipher<P>> {
let mut chacha = ChaCha20::new_ietf(&self.key, &self.nonce);
let r = &mut [0u8; HOP_EATS];
chacha.xor_read(r).unwrap(); // No KeystreamError::EndReached here.
let (packet_name,replay_code,gamma_key) = array_refs![r,16,16,32];
Ok( HeaderCipher {
params: PhantomData,
chunks: StreamChunks::make::<P>() ?,
packet_name: PacketName(*packet_name),
replay_code: ReplayCode(*replay_code),
gamma_key: GammaKey(*gamma_key),
stream: chacha,
} )
}
}
/// Results of our KDF consisting of the nonce and key for our
/// IETF Chacha20 stream cipher, which produces everything else
/// in the Sphinx header.
#[derive(Clone)]
pub struct SphinxKey<P: Params> {
pub params: PhantomData<P>,
/// IETF Chacha20 stream cipher key and nonce.
pub chacha: ChaChaKnN,
}
/*
impl<P> Clone for SphinxKey<P> where P: Params {
fn clone(&self) -> SphinxKey<P> {
SphinxKey {
params: PhantomData,
chacha: chacha.clone(),
}
}
}
*/
impl<P: Params> SphinxKey<P> {
/// Derive the key material for our IETF Chacha20 stream cipher,
/// incorporating both `P::PROTOCOL_NAME` and our `RoutingName`
/// as seed material.
pub fn new_kdf(ss: &SphinxSecret, rn: &::keys::RoutingName) -> SphinxKey<P> {
use crypto::digest::Digest;
use crypto::sha3::Sha3;
let r = &mut [0u8; 32+16]; // ClearOnDrop
let mut sha = Sha3::shake_256();
sha.input(&ss.0);
sha.input_str( "Sphinx" );
sha.input(&rn.0);
sha.input_str( P::PROTOCOL_NAME );
sha.input(&ss.0);
sha.result(r);
sha.reset();
let (nonce,_,key) = array_refs![r,12,4,32];
SphinxKey {
params: PhantomData,
chacha: ChaChaKnN { nonce: *nonce, key: *key }, | /// Initalize our IETF ChaCha20 stream cipher by invoking
/// `ChaChaKnN::header_cipher` with our paramaters `P: Params`.
pub fn header_cipher(&self) -> SphinxResult<HeaderCipher<P>> {
self.chacha.header_cipher::<P>()
}
}
/// Amount of key stream consumed by `hop()` itself
const HOP_EATS : usize = 64;
/// Allocation of cipher ranges for the IETF ChaCha20 inside
/// `HeaderCipher` to various keys and stream cipher roles needed
/// to process a header.
struct StreamChunks {
beta: Range<usize>,
beta_tail: Range<usize>,
surb_log: Range<usize>,
lioness_key: Range<usize>,
blinding: Range<usize>,
delay: Range<usize>,
}
impl StreamChunks {
#[inline]
fn make<P: Params>() -> SphinxResult<StreamChunks> {
let mut offset = HOP_EATS; //
let chunks = {
let mut reserve = |l: usize, block: bool| -> Range<usize> {
if block { offset += 64 - offset % 64; }
let previous = offset;
offset += l;
let r = previous..offset;
debug_assert_eq!(r.len(), l);
r
};
StreamChunks {
beta: reserve(P::BETA_LENGTH as usize,true),
beta_tail: reserve(P::MAX_BETA_TAIL_LENGTH as usize,false),
surb_log: reserve(P::SURB_LOG_LENGTH as usize,true),
lioness_key: reserve(BODY_CIPHER_KEY_SIZE,true),
blinding: reserve(64,true),
delay: reserve(64,true), // Actually 32
}
}; // let chunks
// We check that the maximum key stream length is not exceeded
// here so that calls to both `seek_to` and `xor_read` can
// safetly be `.unwrap()`ed, thereby avoiding `-> SphinxResult<_>`
// everywhere.
if offset > 2^38 {
Err( SphinxError::InternalError("Paramaters exceed IETF ChaCha20 stream!") )
} else { Ok(chunks) }
}
}
/// Semetric cryptography for a single Sphinx sub-hop, usually
/// meaning the whole hop.
///
pub struct HeaderCipher<P: Params> {
params: PhantomData<P>,
/// XChaCha20 Stream cipher used when processing the header
stream: ChaCha20,
/// Stream cipher ranges determined by `params`
chunks: StreamChunks,
/// Replay code for replay protection
replay_code: ReplayCode,
/// The packet's name for SURB unwinding
packet_name: PacketName,
/// Sphinx poly1305 MAC key
gamma_key: GammaKey,
}
// Declare a `HeaderCipher` initalized after `ClearOnDrop` zeros it so
// that it may be dropped normally. Requirs that `Drop::drop` does
// nothing interesting.
impl<P: Params> ::clear_on_drop::clear::InitializableFromZeroed for HeaderCipher<P> {
unsafe fn initialize(_: *mut HeaderCipher<P>) {
}
}
// We implement `Drop::drop` so that `HeaderCipher` cannot be copy.
// `InitializableFromZeroed::initialize` leaves it invalid, so
// `Drop::drop` must not do anything interesting.
impl<P: Params> Drop for HeaderCipher<P> {
fn drop(&mut self) { }
}
impl<P: Params> fmt::Debug for HeaderCipher<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "HeaderCipher {{ {:?}, .. }}", self.replay_code.error_packet_id())
}
}
impl<P: Params> HeaderCipher<P> {
// TODO: Can we abstract the lengths checks? Operate on a pair
// `(LayoutRefs,HeaderCipher)` perhaps?
/// Compute the poly1305 MAC `Gamma` using the key found in a Sphinx key exchange.
///
/// Does not verify the lengths of Beta or the SURB.
pub fn create_gamma(&self, beta: &[u8]) -> SphinxResult<Gamma> {
if beta.len() != P::BETA_LENGTH as usize {
return Err( SphinxError::InternalError("Beta has the incorrect length for MAC!") );
}
// According to the current API gamma_out lies in a buffer supplied
// by our caller, so no need for games to zero it here.
let mut gamma_out: Gamma = Default::default();
let mut poly = Poly1305::new(&self.gamma_key.0);
// let mut poly = ClearOnDrop::new(&mut poly);
poly.input(beta);
poly.raw_result(&mut gamma_out.0);
poly.reset();
Ok(gamma_out)
}
/// Verify the poly1305 MAC `Gamma` given in a Sphinx packet.
///
/// Returns an InvalidMac error if the check fails. Does not
/// verify the lengths of Beta or the SURB.
pub fn verify_gamma(&self, beta: &[u8], gamma_given: &Gamma)
-> SphinxResult<()> {
let gamma_found = self.create_gamma(beta) ?; // InternalError
// TODO: let gamma_found = ClearOnDrop::new(&gamma_found);
if ! ::consistenttime::ct_u8_slice_eq(&gamma_given.0, &gamma_found.0) {
Err( SphinxError::InvalidMac(self.replay_code.error_packet_id()) )
} else { Ok(()) }
}
/// Checks for packet replays using the suplied `ReplayChecker`.
///
/// Replay protection requires that `ReplayChecker::replay_check`
/// returns `Err( SphinxError::Replay(hop.replay_code) )` when a
/// replay occurs.
///
/// You may however use `IgnoreReplay` as the `ReplayChecker` for
/// ratchet sub-hops and for all subhops in packet creation.
pub fn replay_check<RC: ReplayChecker>(&self, replayer: RC) -> | }
}
| random_line_split |
client_darwin.go | ;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess | {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
} | conditional_block |
|
client_darwin.go | nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
}
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) handleWPacket(packet string) (Event, error) {
exitStatus, err := hexToUint64(packet[1:3], false)
return Event{Type: EventTypeExited, Data: int(exitStatus)}, err
}
func (c *Client) handleXPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
// TODO: signalNumber here looks always 0. The number in the description looks correct, so maybe better to use it instead.
return Event{Type: EventTypeTerminated, Data: int(signalNumber)}, err
}
func (c *Client) send(command string) error {
packet := fmt.Sprintf("$%s#00", command)
if !c.noAckMode {
packet = fmt.Sprintf("$%s#%02x", command, calcChecksum([]byte(command)))
}
if n, err := c.conn.Write([]byte(packet)); err != nil {
return err
} else if n != len(packet) {
return fmt.Errorf("only part of the buffer is sent: %d / %d", n, len(packet))
}
if !c.noAckMode {
return c.receiveAck()
}
return nil
}
func (c *Client) receiveAndCheck() error {
if data, err := c.receive(); err != nil {
return err
} else if data != "OK" {
return fmt.Errorf("the error response is returned: %s", data)
}
return nil
}
func (c *Client) receive() (string, error) {
var rawPacket []byte
for {
n, err := c.conn.Read(c.buffer)
if err != nil {
return "", err
}
rawPacket = append(rawPacket, c.buffer[0:n]...)
if len(rawPacket) < 4 {
// there should be at least 4 bytes
continue
} else if rawPacket[len(rawPacket)-3] == '#' {
// received at least 1 packet.
// TODO: handle multiple packets case
break
}
}
packet := string(rawPacket)
data := string(rawPacket[1 : len(rawPacket)-3])
if !c.noAckMode {
if err := verifyPacket(packet); err != nil {
return "", err
}
return data, c.sendAck()
}
return data, nil
}
func (c *Client) receiveWithTimeout(timeout time.Duration) (string, error) {
c.conn.SetReadDeadline(time.Now().Add(timeout))
defer c.conn.SetReadDeadline(time.Time{})
return c.receive()
}
func (c *Client) sendAck() error {
_, err := c.conn.Write([]byte("+"))
return err
}
func (c *Client) receiveAck() error {
if _, err := c.conn.Read(c.buffer[0:1]); err != nil {
return err | } else if c.buffer[0] != '+' {
return errors.New("failed to receive ack")
} | random_line_split |
|
client_darwin.go | x;", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
}
func (c *Client) parseRegisterData(data string) (Registers, error) {
var regs Registers
for _, metadata := range c.registerMetadataList {
rawValue := data[metadata.offset*2 : (metadata.offset+metadata.size)*2]
var err error
switch metadata.name {
case "rip":
regs.Rip, err = hexToUint64(rawValue, true)
case "rsp":
regs.Rsp, err = hexToUint64(rawValue, true)
case "rcx":
regs.Rcx, err = hexToUint64(rawValue, true)
}
if err != nil {
return Registers{}, err
}
}
return regs, nil
}
// WriteRegisters updates the registers' value.
func (c *Client) WriteRegisters(threadID int, regs Registers) error {
data, err := c.readRegisters(threadID)
if err != nil {
return err
}
// The 'P' command is not used due to the bug explained here: https://github.com/llvm-mirror/lldb/commit/d8d7a40ca5377aa777e3840f3e9b6a63c6b09445
for _, metadata := range c.registerMetadataList {
prefix := data[0 : metadata.offset*2]
suffix := data[(metadata.offset+metadata.size)*2:]
var err error
switch metadata.name {
case "rip":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rip, true), suffix)
case "rsp":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rsp, true), suffix)
case "rcx":
data = fmt.Sprintf("%s%s%s", prefix, uint64ToHex(regs.Rcx, true), suffix)
}
if err != nil {
return err
}
}
command := fmt.Sprintf("G%s;thread:%x;", data, threadID)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadMemory reads the specified memory region.
func (c *Client) ReadMemory(addr uint64, out []byte) error {
command := fmt.Sprintf("m%x,%x", addr, len(out))
if err := c.send(command); err != nil {
return err
}
data, err := c.receive()
if err != nil {
return err
} else if strings.HasPrefix(data, "E") {
return fmt.Errorf("error response: %s", data)
}
byteArrary, err := hexToByteArray(data)
if err != nil {
return err
}
if len(byteArrary) != len(out) {
log.Debugf("The data size read from the memory is smaller than the requested size. actual: %d, expected: %d", len(byteArrary), len(out))
}
copy(out, byteArrary)
return nil
}
// WriteMemory write the data to the specified region
func (c *Client) WriteMemory(addr uint64, data []byte) error {
dataInHex := ""
for _, b := range data {
dataInHex += fmt.Sprintf("%02x", b)
}
command := fmt.Sprintf("M%x,%x:%s", addr, len(data), dataInHex)
if err := c.send(command); err != nil {
return err
}
return c.receiveAndCheck()
}
// ReadTLS reads the offset from the beginning of the TLS block.
func (c *Client) ReadTLS(threadID int, offset int32) (uint64, error) {
if err := c.updateReadTLSFunction(uint32(offset)); err != nil {
return 0, err
}
originalRegs, err := c.ReadRegisters(threadID)
if err != nil {
return 0, err
}
defer func() { err = c.WriteRegisters(threadID, originalRegs) }()
modifiedRegs := originalRegs
modifiedRegs.Rip = c.readTLSFuncAddr
if err = c.WriteRegisters(threadID, modifiedRegs); err != nil {
return 0, err
}
if _, err := c.StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) | buildStopReplies | identifier_name |
|
client_darwin.go | .StepAndWait(threadID); err != nil {
return 0, err
}
modifiedRegs, err = c.ReadRegisters(threadID)
return modifiedRegs.Rcx, err
}
func (c *Client) updateReadTLSFunction(offset uint32) error {
if c.currentTLSOffset == offset {
return nil
}
readTLSFunction := c.buildReadTLSFunction(offset)
if err := c.WriteMemory(c.readTLSFuncAddr, readTLSFunction); err != nil {
return err
}
c.currentTLSOffset = offset
return nil
}
func (c *Client) buildReadTLSFunction(offset uint32) []byte {
offsetBytes := make([]byte, 4)
binary.LittleEndian.PutUint32(offsetBytes, offset)
readTLSFunction := []byte{0x65, 0x48, 0x8b, 0x0c, 0x25} // mac OS X uses gs_base
return append(readTLSFunction, offsetBytes...)
}
// ContinueAndWait resumes processes and waits until an event happens.
// The exited event is reported when the main process exits (and not when its threads exit).
func (c *Client) ContinueAndWait() (Event, error) {
return c.continueAndWait(c.pendingSignal)
}
// StepAndWait executes the one instruction of the specified thread and waits until an event happens.
// The returned event may not be the trapped event.
// If unspecified thread is stopped, UnspecifiedThreadError is returned.
func (c *Client) StepAndWait(threadID int) (Event, error) {
var command string
if c.pendingSignal == 0 {
command = fmt.Sprintf("vCont;s:%x", threadID)
} else {
command = fmt.Sprintf("vCont;S%02x:%x", c.pendingSignal, threadID)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
event, err := c.wait()
if err != nil {
return Event{}, err
} else if event.Type != EventTypeTrapped {
return Event{}, fmt.Errorf("unexpected event: %#v", event)
} else if threadIDs := event.Data.([]int); len(threadIDs) != 1 || threadIDs[0] != threadID {
return Event{}, UnspecifiedThreadError{ThreadIDs: threadIDs}
}
return event, err
}
func (c *Client) continueAndWait(signalNumber int) (Event, error) {
var command string
if signalNumber == 0 {
command = "vCont;c"
} else {
// Though the signal number is specified, it's like the debugserver does not pass the signals like SIGTERM and SIGINT to the debugee.
// QPassSignals can change this setting, but debugserver (900.0.64) doesn't support the query.
command = fmt.Sprintf("vCont;C%02x", signalNumber)
}
if err := c.send(command); err != nil {
return Event{}, fmt.Errorf("send error: %v", err)
}
return c.wait()
}
func (c *Client) wait() (Event, error) {
var data string
var err error
for {
data, err = c.receiveWithTimeout(10 * time.Second)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
// debugserver sometimes does not send a reply packet even when a thread is stopped.
data, err = c.checkStopReply()
if err != nil {
return Event{}, fmt.Errorf("failed to query stop reply: %v", err)
} else if data != "" {
log.Debugf("debugserver did not reply packets though there is the stopped thread.")
break
}
} else if err != nil {
return Event{}, fmt.Errorf("receive error: %v", err)
}
if data != "" {
break
}
}
stopReplies := c.buildStopReplies(data)
// process O packet beforehand in order to simplify further processing.
stopReplies, err = c.processOutputPacket(stopReplies)
if err != nil {
return Event{}, fmt.Errorf("failed to process output packet: %v", err)
}
if len(stopReplies) == 0 {
return c.wait()
}
return c.handleStopReply(stopReplies)
}
func (c *Client) checkStopReply() (string, error) {
threadIDs, err := c.ThreadIDs()
if err != nil {
return "", err
}
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return "", err
}
if !strings.HasPrefix(data, "T00") {
return data, nil
}
}
return "", nil
}
func (c *Client) buildStopReplies(data string) []string {
replies := strings.Split(data, "$")
for i, reply := range replies {
if reply[len(reply)-3] == '#' {
replies[i] = reply[0 : len(reply)-3]
}
}
return replies
}
func (c *Client) processOutputPacket(stopReplies []string) ([]string, error) {
var unprocessedReplies []string
for _, stopReply := range stopReplies {
if stopReply[0] != 'O' {
unprocessedReplies = append(unprocessedReplies, stopReply)
continue
}
out, err := hexToByteArray(stopReply[1:])
if err != nil {
return nil, err
}
c.outputWriter.Write(out)
}
return unprocessedReplies, nil
}
func (c *Client) handleStopReply(stopReplies []string) (event Event, err error) {
switch stopReplies[0][0] {
case 'T':
if len(stopReplies) > 1 {
log.Debugf("received 2 or more stop replies at once. Consider only first one. data: %v", stopReplies)
}
event, err = c.handleTPacket(stopReplies[0])
case 'W':
// Ignore remaining packets because the process ends.
event, err = c.handleWPacket(stopReplies[0])
case 'X':
// Ignore remaining packets because the process ends.
event, err = c.handleXPacket(stopReplies[0])
default:
err = fmt.Errorf("unknown packet type: %s", stopReplies[0])
}
if err != nil {
log.Debugf("failed to handle the packet (data: %v): %v", stopReplies[0], err)
return Event{}, err
}
if IsExitEvent(event.Type) {
// the connection may be closed already.
_ = c.close()
}
return event, nil
}
func (c *Client) handleTPacket(packet string) (Event, error) {
signalNumber, err := hexToUint64(packet[1:3], false)
if err != nil {
return Event{}, err
}
if syscall.Signal(signalNumber) == excBadAccess {
log.Debugf("bad memory access: %s", packet)
return Event{}, fmt.Errorf("bad memory access")
}
var threadIDs []int
for _, kvInStr := range strings.Split(packet[3:len(packet)-1], ";") {
kvArr := strings.Split(kvInStr, ":")
key, value := kvArr[0], kvArr[1]
if key == "threads" {
for _, threadID := range strings.Split(value, ",") {
threadIDInNum, err := hexToUint64(threadID, false)
if err != nil {
return Event{}, err
}
threadIDs = append(threadIDs, int(threadIDInNum))
}
}
}
trappedThreadIDs, err := c.selectTrappedThreads(threadIDs)
if err != nil {
return Event{}, err
} else if len(trappedThreadIDs) == 0 {
return c.continueAndWait(int(signalNumber))
}
if syscall.Signal(signalNumber) != unix.SIGTRAP {
c.pendingSignal = int(signalNumber)
} else {
c.pendingSignal = 0
}
return Event{Type: EventTypeTrapped, Data: trappedThreadIDs}, nil
}
func (c *Client) selectTrappedThreads(threadIDs []int) ([]int, error) {
var trappedThreads []int
for _, threadID := range threadIDs {
data, err := c.qThreadStopInfo(threadID)
if err != nil {
return nil, err
}
signalNumber, err := hexToUint64(data[1:3], false)
if err != nil {
return nil, err
}
if syscall.Signal(signalNumber) == unix.SIGTRAP {
trappedThreads = append(trappedThreads, threadID)
}
}
return trappedThreads, nil
}
func (c *Client) qThreadStopInfo(threadID int) (string, error) | {
command := fmt.Sprintf("qThreadStopInfo%02x", threadID)
if err := c.send(command); err != nil {
return "", err
}
data, err := c.receive()
if err != nil {
return "", err
} else if strings.HasPrefix(data, "E") {
return data, fmt.Errorf("error response: %s", data)
}
return data, nil
} | identifier_body |
|
router.go | (v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil |
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, & | {
path = r.parent.getGroupFullPath(r.prefix + path)
} | conditional_block |
router.go | (v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) | (path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &Static | NewGroupPath | identifier_name |
router.go | mutable(v bool) {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
}
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List() | // Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, &Static | }
| random_line_split |
router.go | (v bool) |
func (r *Router) buildMiddlewares(m Middlewares) Middlewares {
m2 := Middlewares{}
m2.Before = append(m2.Before, r.middlewares.Before...)
m2.Before = append(m2.Before, m.Before...)
m2.After = append(m2.After, m.After...)
m2.After = append(m2.After, r.middlewares.After...)
m2.Skip = append(m2.Skip, m.Skip...)
m2.Skip = append(m2.Skip, r.middlewares.Skip...)
m2.Final = append(m2.Final, m.Final...)
m2.Final = append(m2.Final, r.middlewares.Final...)
if r.parent != nil {
return r.parent.buildMiddlewares(m2)
}
m2.Before = appendMiddlewares(m2.Before[:0], m2.Before, m2.Skip...)
m2.After = appendMiddlewares(m2.After[:0], m2.After, m2.Skip...)
return m2
}
func (r *Router) getGroupFullPath(path string) string {
if r.parent != nil {
path = r.parent.getGroupFullPath(r.prefix + path)
}
return path
}
func (r *Router) handler(fn View, middle Middlewares) fasthttp.RequestHandler {
middle = r.buildMiddlewares(middle)
chain := make([]Middleware, 0)
chain = append(chain, middle.Before...)
chain = append(chain, func(ctx *RequestCtx) error {
if !ctx.skipView {
if err := fn(ctx); err != nil {
return err
}
}
return ctx.Next()
})
chain = append(chain, middle.After...)
chainLen := len(chain)
return func(ctx *fasthttp.RequestCtx) {
actx := AcquireRequestCtx(ctx)
for i := 0; i < chainLen; i++ {
if err := chain[i](actx); err != nil {
r.handleMiddlewareError(actx, err)
break
} else if !actx.next {
break
}
actx.next = false
}
for _, final := range middle.Final {
final(actx)
}
ReleaseRequestCtx(actx)
}
}
func (r *Router) handleMiddlewareError(ctx *RequestCtx, err error) {
statusCode := ctx.Response.Header.StatusCode()
if statusCode == fasthttp.StatusOK {
statusCode = fasthttp.StatusInternalServerError
}
r.errorView(ctx, err, statusCode)
}
func (r *Router) handlePath(p *Path) {
isOPTIONS := p.method == fasthttp.MethodOptions
switch {
case p.registered:
r.mutable(true)
case isOPTIONS:
mutable := !gstrings.Include(r.customOPTIONS, p.fullURL)
r.mutable(mutable)
case r.routerMutable:
r.mutable(false)
}
view := p.view
if isOPTIONS {
view = buildOptionsView(p.fullURL, view, r.ListPaths())
r.customOPTIONS = gstrings.UniqueAppend(r.customOPTIONS, p.fullURL)
}
handler := r.handler(view, p.middlewares)
if p.withTimeout {
handler = fasthttp.TimeoutWithCodeHandler(handler, p.timeout, p.timeoutMsg, p.timeoutCode)
}
handleFunc := r.router.Handle
if r.group != nil {
handleFunc = r.group.Handle
}
handleFunc(p.method, p.url, handler)
if r.handleOPTIONS && !p.registered && !isOPTIONS {
view = buildOptionsView(p.fullURL, emptyView, r.ListPaths())
handler = r.handler(view, p.middlewares)
r.mutable(true)
handleFunc(fasthttp.MethodOptions, p.url, handler)
}
}
// NewGroupPath returns a new router to group paths.
func (r *Router) NewGroupPath(path string) *Router {
groupFunc := r.router.Group
if r.group != nil {
groupFunc = r.group.Group
}
return &Router{
parent: r,
router: r.router,
routerMutable: r.routerMutable,
errorView: r.errorView,
prefix: path,
group: groupFunc(path),
handleOPTIONS: r.handleOPTIONS,
}
}
// ListPaths returns all registered routes grouped by method.
func (r *Router) ListPaths() map[string][]string {
return r.router.List()
}
// Middlewares defines the middlewares (before, after and skip) in the order in which you want to execute them
// for the view or group
//
// WARNING: The previous middlewares configuration could be overridden.
func (r *Router) Middlewares(middlewares Middlewares) *Router {
r.middlewares = middlewares
return r
}
// UseBefore registers the middlewares in the order in which you want to execute them
// before the execution of the view or group.
func (r *Router) UseBefore(fns ...Middleware) *Router {
r.middlewares.Before = append(r.middlewares.Before, fns...)
return r
}
// UseAfter registers the middlewares in the order in which you want to execute them
// after the execution of the view or group.
func (r *Router) UseAfter(fns ...Middleware) *Router {
r.middlewares.After = append(r.middlewares.After, fns...)
return r
}
// UseFinal registers the given middlewares to be executed in the order in which they are added,
// after the view or group has been executed. These middlewares will always be executed,
// even if a previous middleware or the view/group returned a response.
func (r *Router) UseFinal(fns ...FinalMiddleware) *Router {
r.middlewares.Final = append(r.middlewares.Final, fns...)
return r
}
// SkipMiddlewares registers the middlewares that you want to skip when executing the view or group.
func (r *Router) SkipMiddlewares(fns ...Middleware) *Router {
r.middlewares.Skip = append(r.middlewares.Skip, fns...)
return r
}
// GET shortcut for router.Path("GET", url, viewFn).
func (r *Router) GET(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodGet, url, viewFn)
}
// HEAD shortcut for router.Path("HEAD", url, viewFn).
func (r *Router) HEAD(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodHead, url, viewFn)
}
// OPTIONS shortcut for router.Path("OPTIONS", url, viewFn).
func (r *Router) OPTIONS(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodOptions, url, viewFn)
}
// POST shortcut for router.Path("POST", url, viewFn).
func (r *Router) POST(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPost, url, viewFn)
}
// PUT shortcut for router.Path("PUT", url, viewFn).
func (r *Router) PUT(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPut, url, viewFn)
}
// PATCH shortcut for router.Path("PATCH", url, viewFn).
func (r *Router) PATCH(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodPatch, url, viewFn)
}
// DELETE shortcut for router.Path("DELETE", url, viewFn).
func (r *Router) DELETE(url string, viewFn View) *Path {
return r.Path(fasthttp.MethodDelete, url, viewFn)
}
// ANY shortcut for router.Path("*", url, viewFn)
//
// WARNING: Use only for routes where the request method is not important.
func (r *Router) ANY(url string, viewFn View) *Path {
return r.Path(fastrouter.MethodWild, url, viewFn)
}
// RequestHandlerPath wraps fasthttp request handler to atreugo view and registers it to
// the given path and method.
func (r *Router) RequestHandlerPath(method, url string, handler fasthttp.RequestHandler) *Path {
viewFn := func(ctx *RequestCtx) error {
handler(ctx.RequestCtx)
return nil
}
return r.Path(method, url, viewFn)
}
// NetHTTPPath wraps net/http handler to atreugo view and registers it to
// the given path and method.
//
// While this function may be used for easy switching from net/http to fasthttp/atreugo,
// it has the following drawbacks comparing to using manually written fasthttp/atreugo,
// request handler:
//
// - A lot of useful functionality provided by fasthttp/atreugo is missing
// from net/http handler.
// - net/http -> fasthttp/atreugo handler conversion has some overhead,
// so the returned handler will be always slower than manually written
// fasthttp/atreugo handler.
//
// So it is advisable using this function only for quick net/http -> fasthttp
// switching. Then manually convert net/http handlers to fasthttp handlers.
// according to https://github.com/valyala/fasthttp#switching-from-nethttp-to-fasthttp.
func (r *Router) NetHTTPPath(method, url string, handler http.Handler) *Path {
h := fasthttpadaptor.NewFastHTTPHandler(handler)
return r.RequestHandlerPath(method, url, h)
}
// Static serves static files from the given file system root
//
// Make sure your program has enough 'max open files' limit aka
// 'ulimit -n' if root folder contains many files.
func (r *Router) Static(url, rootPath string) *Path {
return r.StaticCustom(url, & | {
if v != r.routerMutable {
r.routerMutable = v
r.router.Mutable(v)
}
} | identifier_body |
ciao-vendor.go | r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func vendorNew(cwd, sourceRoot, projectRoot, repo string, ri repoInfo) error {
_, ok := repos[repo]
if ok {
return fmt.Errorf("%s is already vendored", repo)
}
repos[repo] = ri
if err := writeRepos(cwd); err != nil {
return err
}
return vendor(cwd, projectRoot, sourceRoot)
}
func unvendor(cwd, sourceRoot, projectRoot, repo string) error {
_, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not vendored", repo)
}
delete(repos, repo)
if err := writeRepos(cwd); err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err := os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
fmt.Printf("%s unvendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well\n", repo)
return nil
}
func runCommand(cwd, sourceRoot string, args []string) error {
var err error
projectRoot := cwd[len(sourceRoot)+1:]
switch args[1] {
case "check":
err = check(cwd, projectRoot)
case "vendor":
err = vendor(cwd, projectRoot, sourceRoot)
case "deps":
err = deps(projectRoot)
case "packages":
err = packages(cwd, projectRoot)
case "uses":
fs := flag.NewFlagSet("uses", flag.ExitOnError)
direct := false
fs.BoolVar(&direct, "d", false, "output direct dependencies only")
if err := fs.Parse(args[2:]); err != nil {
return err
}
if len(fs.Args()) == 0 {
return fmt.Errorf("Missing package for uses command")
}
err = uses(fs.Args()[0], projectRoot, direct)
case "updates":
err = updates(sourceRoot, projectRoot)
case "test":
fs := flag.NewFlagSet("test", flag.ExitOnError)
sudo := false
fs.BoolVar(&sudo, "s", false, "run tests with sudo")
if err := fs.Parse(args[2:]); err != nil {
return err
}
args = fs.Args()
err = test(sudo, sourceRoot, projectRoot, args[0], args[1], args[2:])
case "revendor":
err = revendor(cwd, sourceRoot, projectRoot, args[2], args[3])
case "vendornew":
ri := repoInfo{URL: args[5], Version: args[3], License: args[4]}
err = vendorNew(cwd, sourceRoot, projectRoot, args[2], ri)
case "unvendor":
err = unvendor(cwd, sourceRoot, projectRoot, args[2])
}
return err
}
func readRepos(projectRoot string) error {
packageFile := path.Join(projectRoot, "packages.json")
d, err := ioutil.ReadFile(packageFile)
if err != nil | {
if !os.IsNotExist(err) {
return fmt.Errorf("Unable to read %s : %v", packageFile, err)
}
return nil
} | conditional_block |
|
ciao-vendor.go | {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
ok := checkKnown(missing, deps)
ok = checkUninstalled(uninstalled) && ok
ok = checkVendored(notVendored) && ok
ok = checkNotUsed(notUsed) && ok
if !ok {
return fmt.Errorf("Dependency checks failed")
}
return nil
}
func packages(cwd, projectRoot string) error {
uninstalledDeps := false
plist, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\tRepo\tVersion\tLicense")
for _, d := range plist {
fmt.Fprintf(w, "%s\t", d.name)
r := ""
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot)+1 &&
strings.HasPrefix(d.name[len(vendorRoot)+1:], k)) {
r = k
break
}
}
if d.vendored {
fmt.Fprintf(w, "Vendored\t")
} else if d.installed {
fmt.Fprintf(w, "GOPATH\t")
} else {
fmt.Fprintf(w, "Missing\t")
uninstalledDeps = true
}
if repos[r].URL != "" {
fmt.Fprintf(w, "%s\t", r)
if d.vendored {
fmt.Fprintf(w, "%s\t", repos[r].Version)
} else {
fmt.Fprintf(w, "master\t")
}
fmt.Fprintf(w, "%s", repos[r].License)
} else {
fmt.Fprintf(w, "Unknown\tUnknown\tUnknown")
}
fmt.Fprintln(w)
}
w.Flush()
if uninstalledDeps {
fmt.Println("")
return fmt.Errorf("Some dependencies are not installed. Unable to provide complete dependency list")
}
return nil
}
func deps(projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor")
missing, uninstalled, notVendored, notUsed := verify(deps, vendorRoot)
if len(missing) != 0 || len(uninstalled) != 0 || len(notVendored) != 0 || len(notUsed) != 0 {
return fmt.Errorf("Dependencies out of sync. Please run go ciao-vendor/ciao-vendor.go check")
}
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package Root\tRepo\tVersion\tLicense")
for _, k := range keys {
r := repos[k]
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", k, r.URL, r.Version, r.License)
}
w.Flush()
return nil
}
func uses(pkg string, projectRoot string, direct bool) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
var output bytes.Buffer
cmd := exec.Command("go", "list", "./...")
cmd.Stdout = &output
err = cmd.Run()
if err != nil {
return fmt.Errorf("go list failed: %v", err)
}
scanner := bufio.NewScanner(&output)
vendorPrefix := path.Join(projectRoot, "vendor")
for scanner.Scan() {
d := scanner.Text()
if !strings.HasPrefix(d, vendorPrefix) {
deps = append(deps, &packageInfo{name: d})
}
}
var template string
if direct {
template = directTemplate
} else {
template = listTemplate
}
clientCh := make(chan clientInfo)
for _, d := range deps {
go func(name string) {
ci := clientInfo{}
pd, err := getPackageDependencies([]string{name}, template)
if err == nil {
if _, ok := pd[pkg]; ok {
ci.name = name
}
} else {
ci.err = err
}
clientCh <- ci
}(d.name)
}
clients := make([]string, 0, len(deps))
for range deps {
clientInfo := <-clientCh
if clientInfo.err != nil {
return err
}
if clientInfo.name != "" {
clients = append(clients, clientInfo.name)
}
}
sort.Strings(clients)
for _, client := range clients {
fmt.Println(client)
}
return nil
}
func updates(sourceRoot, projectRoot string) error {
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
return err
}
vendorRoot := path.Join(projectRoot, "vendor") + "/"
for _, d := range deps {
if strings.HasPrefix(d.name, vendorRoot) {
d.name = d.name[len(vendorRoot):]
}
}
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Package\tStatus\t")
keys := make([]string, 0, len(repos))
for k := range repos {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := repos[k]
var output bytes.Buffer
cmd := exec.Command("git", "log", "--oneline", fmt.Sprintf("%s..HEAD", v.Version))
cmd.Stdout = &output
cmd.Dir = path.Join(sourceRoot, k)
err = cmd.Run()
if err != nil {
fmt.Fprintf(w, "%s\tUnknown: %v\t\n", k, err)
continue
}
scanner := bufio.NewScanner(&output)
count := 0
for scanner.Scan() {
count++
}
if count != 0 {
fmt.Fprintf(w, "%s\t%d commits behind HEAD\t\n", k, count)
} else {
fmt.Fprintf(w, "%s\tUp to date\t\n", k)
}
}
w.Flush()
return nil
}
func test(sudo bool, sourceRoot, projectRoot, pkg, version string, goTestFlags []string) error {
fmt.Printf("Go getting %s\n", pkg)
cmd := exec.Command("go", "get", "-t", "-u", pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", pkg)
}
branch, err := getCurrentBranch(path.Join(sourceRoot, pkg))
if err != nil {
return fmt.Errorf("Unable to determine current branch of %s: %v", pkg, err)
}
cmd = exec.Command("git", "checkout", version)
cmd.Dir = path.Join(sourceRoot, pkg)
err = cmd.Run()
if err != nil {
return fmt.Errorf("Unable to checkout version %s of %s: %v",
version, pkg, err)
}
var args []string
var command string
if sudo {
command = "sudo"
args = []string{"-E", "go"}
} else {
command = "go"
}
args = append(args, "test")
args = append(args, goTestFlags...)
args = append(args, pkg)
cmd = exec.Command(command, args...)
cmd.Dir = path.Join(sourceRoot, pkg)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if sudo {
cmd.Stdin = os.Stdin
}
err = cmd.Run()
cmd = exec.Command("git", "checkout", branch)
cmd.Dir = path.Join(sourceRoot, pkg)
_ = cmd.Run()
return err
}
func revendor(cwd, sourceRoot, projectRoot, repo, version string) error {
ri, ok := repos[repo]
if !ok {
return fmt.Errorf("%s is not a vendored repository", repo)
}
fmt.Printf("Go getting %s\n", repo)
cmd := exec.Command("go", "get", "-v", "-u", "-d", repo+"/...")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("Unable to go get %s", repo)
}
ri.Version = version
repos[repo] = ri
err = writeRepos(cwd)
if err != nil {
return err
}
vendoredDir := path.Join(cwd, "vendor", repo)
err = os.RemoveAll(vendoredDir)
if err != nil {
return fmt.Errorf("Unable to remove vendored directory %s : %v",
vendoredDir, err)
}
return vendor(cwd, projectRoot, sourceRoot)
}
func | vendorNew | identifier_name |
|
ciao-vendor.go |
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps}
}(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
}
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string) | }
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendorRoot | for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap) | random_line_split |
ciao-vendor.go |
}
err = cmd.Start()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
err = cmd.Wait()
if err != nil {
return err
}
goGot[repoFound] = struct{}{}
}
return nil
}
func getCurrentBranch(repo string) (string, error) {
cmd := exec.Command("git", "symbolic-ref", "HEAD")
cmd.Dir = repo
output, err := cmd.Output()
if err != nil {
return "", err
}
scanner := bufio.NewScanner(bytes.NewBuffer(output))
if !scanner.Scan() {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
branch := strings.TrimSpace(scanner.Text())
const prefix = "refs/heads/"
if !strings.HasPrefix(branch, prefix) {
return "", fmt.Errorf("Unable to determine current branch of %s",
repo)
}
return branch[len(prefix):], nil
}
func checkoutVersion(sourceRoot string) {
for k, v := range repos {
cmd := exec.Command("git", "checkout", v.Version)
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func checkoutMaster(sourceRoot string) {
for k := range repos {
cmd := exec.Command("git", "checkout", "master")
cmd.Dir = path.Join(sourceRoot, k)
_ = cmd.Run()
}
}
func findDocs(dir, prefix string) ([]string, error) {
docs := make([]string, 0, 8)
docGlob := []string{
"LICENSE*",
"README*",
"NOTICE",
"MAINTAINERS*",
"PATENTS*",
"AUTHORS*",
"CONTRIBUTORS*",
"VERSION",
}
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && (dir != path) {
return filepath.SkipDir
}
for _, pattern := range docGlob {
match, err := filepath.Match(pattern, info.Name())
if err != nil {
return err
}
if match {
docs = append(docs, filepath.Join(prefix, info.Name()))
break
}
}
return nil
})
if err != nil {
return nil, err
}
return docs, nil
}
func computeSubPackages(deps piList) map[string][]*subPackage {
subPackages := make(map[string][]*subPackage)
for _, d := range deps {
for k := range repos {
if !strings.HasPrefix(d.name, k) {
continue
}
packages := subPackages[k]
pkg := d.name[len(k):]
if pkg == "" {
packages = append([]*subPackage{{name: k, wildcard: "*", cgo: d.CGO}}, packages...)
} else if pkg[0] == '/' {
packages = append(packages, &subPackage{name: d.name, wildcard: pkg[1:] + "/*", cgo: d.CGO})
} else {
fmt.Printf("Warning: unvendored package: %s\n", d.name)
}
subPackages[k] = packages
break
}
}
return subPackages
}
// This might look a little convoluted but we can't just go get
// on all the repos in repos, using a wildcard. This would build
// loads of stuff we're not interested in at best and at worst,
// breakage in a package we're not interested in would break
// ciao-vendor
//
// We can't just go get github.com/01org/ciao this would pull down
// the dependencies of the master version of ciao's depdendencies
// which is not what we want. This might miss some dependencies
// which have been deleted from the master branch of ciao's
// dependencies.
//
// So we need to figure out which dependencies ciao actually has,
// pull them down, check out the version of these dependencies
// that ciao actually uses, and then recompute our dependencies.
//
// Right now it's possible for a ciao dependency to have a dependency
// that is no longer present in master. This dependency will not be
// pulled down. If this happens, ciao-vendor vendor will need to be
// invoked again. We could probably fix this here.
func vendor(cwd, projectRoot, sourceRoot string) error {
checkoutVersion(sourceRoot)
deps, err := calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
i := 0
for ; i < len(deps); i++ {
if !deps[i].vendored {
break
}
}
if i < len(deps) {
checkoutMaster(sourceRoot)
err = updateNonVendoredDeps(deps, projectRoot)
if err != nil {
return err
}
checkoutVersion(sourceRoot)
deps, err = calcDeps(projectRoot, []string{"./..."})
if err != nil {
checkoutMaster(sourceRoot)
return err
}
}
subPackages := computeSubPackages(deps)
for k := range subPackages {
packages := subPackages[k]
for _, p := range packages {
dir := path.Join(sourceRoot, p.name)
prefix := p.name[len(k):]
if len(prefix) > 0 {
prefix = prefix[1:]
}
docs, err := findDocs(dir, prefix)
if err != nil {
checkoutMaster(sourceRoot)
return err
}
p.docs = docs
}
if packages[0].wildcard != "*" {
dir := path.Join(sourceRoot, k)
docs, err := findDocs(dir, "")
if err != nil {
checkoutMaster(sourceRoot)
return err
}
packages = append(packages, &subPackage{name: k, docs: docs})
}
subPackages[k] = packages
}
checkoutMaster(sourceRoot)
fmt.Println("Populating vendor folder")
err = copyRepos(cwd, sourceRoot, subPackages)
if err != nil {
return err
}
fmt.Println("Dependencies vendored. Run go run ciao-vendor/ciao-vendor.go check to verify all is well")
return nil
}
func usedBy(name string, packages piList, depsMap map[string][]string) string {
var users bytes.Buffer
for _, p := range packages {
if p.name == name {
continue
}
deps := depsMap[p.name]
for _, d := range deps {
if d == name {
users.WriteString(" ")
users.WriteString(p.name)
break
}
}
}
// BUG(markus): We don't report when a dependency is used by ciao if
// it is also used by a dependency
if users.Len() == 0 {
return "project"
}
return users.String()[1:]
}
func depsByPackage(packages piList) map[string][]string | }(p.name)
}
for range packages {
pkgDeps := <-depsCh
depsMap[pkgDeps.p] = pkgDeps.deps
}
return depsMap
}
func computeClients(packages piList) map[string]string {
depsMap := depsByPackage(packages)
clientMap := make(map[string]string)
for _, p := range packages {
clientMap[p.name] = usedBy(p.name, packages, depsMap)
}
return clientMap
}
func verify(deps piList, vendorRoot string) ([]string, []string, []string, []string) {
uninstalled := make([]string, 0, 128)
missing := make([]string, 0, 128)
notVendored := make([]string, 0, 128)
notUsed := make([]string, 0, 128)
reposUsed := make(map[string]struct{})
depLoop:
for _, d := range deps {
if !d.installed {
uninstalled = append(uninstalled, d.name)
}
for k := range repos {
if strings.HasPrefix(d.name, k) ||
(len(d.name) > len(vendor | {
depsMap := make(map[string][]string)
depsCh := make(chan packageDeps)
for _, p := range packages {
go func(p string) {
var output bytes.Buffer
cmd := exec.Command("go", "list", "-f", listTemplate, p)
cmd.Stdout = &output
err := cmd.Run()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to call get list on %s : %v", p, err)
depsCh <- packageDeps{p: p}
return
}
scanner := bufio.NewScanner(&output)
deps := make([]string, 0, 32)
for scanner.Scan() {
deps = append(deps, scanner.Text())
}
depsCh <- packageDeps{p, deps} | identifier_body |
newtoon.py | (error: Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages): | filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
| random_line_split |
|
newtoon.py | :
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
| return self.episode > other.episode | identifier_body |
|
newtoon.py | : Type[Exception], message: Optional[str] = None):
def decorator(func: Callable) -> Coroutine:
@wraps(func)
async def wrapper(*args, **kwargs) -> Optional[Any]:
try:
return await func(*args, **kwargs)
except error:
if message:
print(message)
return None
return wrapper
return decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
|
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
| options.headless = True
options.add_argument('--headless') | conditional_block |
newtoon.py | decorator
class InMemoryZipFile:
def __init__(self, filename: str):
self.io = BytesIO()
self.cbz = zipfile.ZipFile(self.io, 'w', zipfile.ZIP_DEFLATED)
self.filename = filename
def __str__(self) -> str:
return self.filename
def exists(self) -> bool:
return os.path.exists(self.filename)
def write(self, filename: str, data: bytes) -> None:
self.cbz.writestr(filename, data)
def close(self):
self.cbz.close()
def save(self) -> None:
self.close()
self.io.seek(0)
with open(self.filename, 'wb') as fp:
fp.write(self.io.read())
class SeleniumMixin:
_driver: Optional[Union[webdriver.Firefox, webdriver.Chrome]] = None
_headless: bool = False
@classmethod
def get_new_marionette(cls, headless: bool = False) -> uc.Chrome:
print('Requesting a new marionette')
options = uc.ChromeOptions()
if headless:
options.headless = True
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-first-run --no-service-autorun --password-store=basic')
extenssions_paths = [
'/usr/lib/ublock-origin'
]
for extenssion in extenssions_paths:
options.add_argument(f'--load-extension={extenssion}')
driver = uc.Chrome(options=options)
print('Got new marionette.')
return driver
@property
def driver(self) -> Union[webdriver.Firefox, webdriver.Chrome]:
if not self._driver:
# self._driver = webdriver.Firefox()
# self._driver = webdriver.Chrome()
self._driver = self.get_new_marionette(self._headless)
return self._driver
async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:
"""The `delay` parameter wait for the page to load/execute the scripts
in the marionette, some websites require that otherwise the JS don't
have the time to populate divs/lists.
"""
if url != self.driver.current_url:
self.driver.get(url)
return BeautifulSoup(self.driver.page_source, 'lxml')
async def parse_cloudflare_url(self, url: str, delay: int = 0) -> BeautifulSoup:
self.driver.get(url)
for index in range(20):
await asyncio.sleep(delay)
page = BeautifulSoup(self.driver.page_source, 'lxml')
# print(f'{index:02}: {self.driver.current_url}')
challenge_form = page.find('form', {'class': 'challenge-form'})
if not challenge_form:
return page
await asyncio.sleep(8)
async def post_cloudflare_challenge(self, page: BeautifulSoup) -> None:
challenge_form = page.find('form', {'class': 'challenge-form'})
challenge_link = challenge_form['action']
challenge_inputs = challenge_form.find_all('input')
payload = dict({
field['name']: field['value'] for field in challenge_inputs if field.get('value', None)
})
cookies = self.driver.get_cookies()
print('POST', challenge_link, payload, cookies)
class LocalStorage:
"""Allow to access to the local storage of the marionette from python.
"""
def __init__(self, driver: uc.Chrome) :
self.driver = driver
def __len__(self):
return self.driver.execute_script("return window.localStorage.length;")
def items(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, items = {}; " \
"for (var i = 0, k; i < ls.length; ++i) " \
" items[k = ls.key(i)] = ls.getItem(k); " \
"return items; ")
def keys(self) :
return self.driver.execute_script( \
"var ls = window.localStorage, keys = []; " \
"for (var i = 0; i < ls.length; ++i) " \
" keys[i] = ls.key(i); " \
"return keys; ")
def get(self, key):
return self.driver.execute_script("return window.localStorage.getItem(arguments[0]);", key)
def set(self, key, value):
self.driver.execute_script("window.localStorage.setItem(arguments[0], arguments[1]);", key, value)
def has(self, key):
return key in self.keys()
def remove(self, key):
self.driver.execute_script("window.localStorage.removeItem(arguments[0]);", key)
class Chapter(PrivatesAttrsMixin, EmbeddedDocument):
"""
Describe ONE chapter of a webtoon.
Things to override:
properties:
- url
functions:
- get_pages_urls (return the list of urls)
- nexts (return the list of next chapters after the curent instance)
"""
name: str
episode: Optional[int]
_parent: Optional["WebToonPacked"]
@validator('episode', pre=True)
def validate_episode(cls, value: Optional[Union[int, str]]) -> Optional[int]:
if isinstance(value, str):
rule = re.compile(r'^[a-z\-]+(\d+)')
match = rule.match(value)
if match:
chapter = match.groups()[0]
return int(chapter)
return value
@validator('name', pre=True)
def validate_name(cls, value: str) -> str:
try:
return value \
.replace('"', "") \
.replace('/', '') \
.replace('*','') \
.replace('&', '&') \
.replace(''', '\'')
except Exception:
return value
def __str__(self) -> str:
return self.name
@property
def cbz_path(self) -> str:
return os.path.join(self._parent.path, self.name.strip() + '.cbz')
def exists(self) -> bool:
return os.path.exists(self.cbz_path)
async def pull(self, pool_size: int = 3) -> bool:
"""
download the files related to this chapter.
return True if the image is present on disk
False otherwise
Note that presence on disk can be True in multiples cases:
- it was here before
- it was downloaded successfully
"""
if self.exists():
return True
self._start_pull()
pair_list = list([(filename, str(url)) async for filename, url in self])
if not pair_list:
self._no_content()
return False
pool = AioPool(pool_size)
cbz = InMemoryZipFile(self.cbz_path)
async with self._parent.get_client() as client:
async def download_coroutine(pair: Tuple[str, str]) -> bool:
"""return True if the file has been downloaded, False otherwise
may raise errors that will be present in results
"""
filename, url = pair
# Download the page
response = await client.get(url, ssl=self._parent.ssl_context)
response.raise_for_status()
# Save the page content to the cbz file
page_content: bytes = await response.read()
if not await self.is_valid_page_content(page_content):
return False
cbz.write(filename, page_content)
self._progress()
return True
result = await pool.map(download_coroutine, pair_list)
raise_on_any_error_from_pool(result)
if not any(result):
self.log('Empty, removed')
return False
cbz.save()
self.log('\n', end='')
return True
async def is_valid_page_content(self, page_content: bytes) -> bool:
"""return True if the page content seems to be a valid image
False otherwise
"""
try:
image = Image.open(BytesIO(page_content))
return True
except UnidentifiedImageError:
return False
async def get_pages_urls(self) -> List[HttpUrl]:
raise NotImplementedError
async def nexts(self) -> List["Chapter"]:
# override this to implement the next pull feature
get_chapters_func = getattr(self._parent, 'get_chapters_from_website')
if not get_chapters_func:
return []
chapters = await get_chapters_func()
return list(filter(lambda chapter: chapter > self, chapters))
async def __aiter__(self) -> AsyncGenerator[Tuple[str, HttpUrl], None]:
pages = await self.get_pages_urls()
for index, url in enumerate(pages):
filename = f'{index:03}.jpg'
yield filename, url
def _progress(self) -> None:
self.log('.', end='')
def _start_pull(self) -> None:
self.log(f'{self.name}: ', end='')
def _no_content(self) -> None:
self.log('No content')
def log(self, *args, **kwargs) -> None:
print(*args, **kwargs)
sys.stdout.flush()
def __gt__(self, other: "Chapter") -> bool:
return self.episode > other.episode
def __lt__(self, other: "Chapter") -> bool:
return self.episode < other.episode
def __eq__(self, other: "Chapter") -> bool:
return self.episode == other.episode
class | ToonManager | identifier_name |
|
overlay.js | go2.pl' : 'o2pl',
'tlen.pl' : 'o2pl',
'o2.pl' : 'o2pl',
'google.com' : 'google',
'twitter.com' : 'twitter',
'facebook.com' : 'facebook',
'mailgun.us' : 'rackspace',
'mailgun.org' : 'rackspace',
'emailsrvr.com' : 'rackspace',
'rackspace.com' : 'rackspace',
'dreamhost.com' : 'dreamhost',
'linode.com' : 'linode',
'messagingengine.com' : 'opera',
'fastmail.fm' : 'opera',
'fastmail.net' : 'opera',
'onet.pl' : 'onet',
'sendgrid.com' : 'sendgrid',
'sendgrid.net' : 'sendgrid',
'wp.pl' : 'wp',
'hostgator.com' : 'hostgator',
'hostgator.net' : 'hostgator',
'interia.pl' : 'interia',
'yahoo.com' : 'yahoo',
'hotmail.com' : 'hotmail',
'outlook.com' : 'hotmail',
'live.com' : 'hotmail',
'qq.com' : 'qq',
'gadu-gadu.pl' : 'gadu',
'amazonses.com' : 'amazon',
'amazon.com' : 'amazon',
'home.pl' : 'home',
'home.net.pl' : 'home',
};
var found = new Array();
receivedHeaders.forEach(function(hdr) {
var domainRegex = /(?:\.|^)([a-z0-9\-]+\.[a-z0-9\-]+)$/g;
var thirdLevelDomain = /^(net|com|org|biz)\.[a-z0-9]+$/g;
var thirdLevelDomainRegex = /(?:\.|^)([a-z0-9\-]+\.[a-z0-9\-]+\.[a-z0-9\-]+)$/g;
var match = domainRegex.exec(hdr.from.toLowerCase());
if(match)
{
var domain = match[1];
// special case - .net.pl etc
if(thirdLevelDomain.test(domain)) {
match = thirdLevelDomainRegex.exec(hdr.from.toLowerCase());
if(match) {
domain = match[1];
}
}
if(known[domain] && found.indexOf(known[domain]) == -1) {
found.push(known[domain]);
}
}
});
return found;
},
/* Return number of insecure hosts in the path */
paranoiaAreReceivedHeadersInsecure: function(receivedHeaders) {
var insecure = 0;
var unencryptedLocal = 0;
var encrypted = 0;
receivedHeaders.forEach(function(header) {
// Application.console.log(header.from + " - " + header.secure);
if(!header.secure && !header.local) insecure++;
if(!header.secure && header.local) unencryptedLocal++;
if(header.secure) encrypted++;
});
return {
'insecure': insecure,
'unencryptedLocal': unencryptedLocal,
'encrypted': encrypted
};
},
/* Create a popup menu with all 'Received:' headers */
paranoiaCreateReceivedPopup: function(receivedHeaders) {
var popup = document.createElement('menupopup');
popup.setAttribute('id', 'paranoiaConnectionList');
receivedHeaders.forEach(function(hdr) {
var item = document.createElement('menuitem');
item.setAttribute('label', hdr.toString());
popup.appendChild(item);
});
return popup;
},
/* Remove popup from DOM tree, if found */
paranoiaRemoveReceivedPopup: function() {
var elem = document.getElementById('paranoiaConnectionList');
if(elem) elem.parentNode.removeChild(elem);
},
/* Return XULElement with icon - create one if necessary */
paranoiaGetHdrIconDOM: function() {
var id = 'paranoiaHdrIcon';
if(document.getElementById(id))
{
return document.getElementById(id);
}
var parentBox = document.getElementById('dateValueBox'); ///////
var previousBox = document.getElementById('smimeBox');
if(!parentBox || !previousBox) {
Application.console.log('Chrome element not found');
}
var elem = document.createElement('image');
elem.setAttribute('id', id);
elem.onclick = function() {
document.getElementById('paranoiaConnectionList').openPopup(this, 'after_start', 0, 0, false, false);
}
parentBox.insertBefore(elem, previousBox);
return elem;
},
paranoiaSetPerfectIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/perfect.png")');
icon.setAttribute('tooltiptext', 'Perfect - no known email providers and encryption between all hops');
return icon;
},
paranoiaSetGoodIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/good.png")');
icon.setAttribute('tooltiptext', 'Good - Email passed known providers or was unencrypted only on a local connection');
return icon;
},
paranoiaSetBadIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/bad.png")');
icon.setAttribute('tooltiptext', '1 non-local connection on the way was unencrypted');
return icon;
},
paranoiaSetTragicIcon: function() {
var icon = tbParanoia.paranoiaGetHdrIconDOM();
icon.setAttribute('style', 'list-style-image: url("chrome://demo/skin/tragic.png")');
icon.setAttribute('tooltiptext', 'More than 1 connection on the way was unencrypted');
return icon;
},
paranoiaAddProviderIcon: function(providerName, parentBox) {
var previousBox = tbParanoia.paranoiaGetHdrIconDOM();
var elem = document.createElement('image');
elem.setAttribute('class', 'paranoiaProvider');
elem.setAttribute('style', 'list-style-image: url("chrome://demo/skin/providers/' + providerName + '.png")');
elem.setAttribute('tooltiptext', tbParanoia.paranoiaGetProviderDisplayName(providerName));
parentBox.appendChild(elem);
},
paranoiaAddProviderIcons: function(providers)
{
var oldIcons = document.getElementsByClassName('paranoiaProviderVbox');
var i, len = oldIcons.length;
var vbox;
for(i = 0; i < len; i++) {
var elem = oldIcons[i];
elem.parentNode.removeChild(elem);
}
for(var i = 0; i < providers.length; i++) {
var item = providers[i];
if(i % 2 == 0) {
if(vbox) document.getElementById('dateValueBox').insertBefore(vbox, tbParanoia.paranoiaGetHdrIconDOM());
var vbox = document.createElement('vbox');
vbox.setAttribute('class', 'paranoiaProviderVbox');
}
tbParanoia.paranoiaAddProviderIcon(item, vbox);
};
if(vbox) document.getElementById('dateValueBox').insertBefore(vbox, tbParanoia.paranoiaGetHdrIconDOM());
},
/* Return true if host is on a local network */
paranoiaIsHostLocal: function(hostname) {
if(hostname == 'localhost') return true;
if(hostname == '[127.0.0.1]') return true;
if(hostname == 'Internal') return true;
if(hostname == 'www-data') return true;
if(/^\.internal$/g.test(hostname)) return true;
if(/(^\[10\.)|(^\[172\.1[6-9]\.)|(^\[172\.2[0-9]\.)|(^\[172\.3[0-1]\.)|(^\[192\.168\.)/g.test(hostname)) return true;
return false;
},
/* mx123.mail.corpo.com -> corpo.com */
paranoiaGetDomainName: function(hostname) {
if(hostname.indexOf('.') < 0) {
return hostname;
}
try {
return hostname.match(/[a-z0-9][a-z0-9\-]+\.[a-z]+$/)[0];
} catch(e) {
return hostname;
}
},
/* Checks if given nsMsgFolder is a RSS/Atom feed folder */
paranoiaIsFeedFolder: function(folder) {
return /^mailbox:\/\/[^@\/]+@Feeds/.exec(folder.URI); | },
init: function() {
// http://stackoverflow.com/questions/5089405/thunderbird-extension-add-field-to-messagepane-how-to-deal-with-windows-instan | random_line_split |
|
cassandraMetadataPersistence.go | ?, ` +
`emit_metric: ?, ` +
`archival_bucket: ?, ` +
`archival_status: ?,` +
`history_archival_status: ?, ` +
`history_archival_uri: ?, ` +
`visibility_archival_status: ?, ` +
`visibility_archival_uri: ?, ` +
`bad_binaries: ?,` +
`bad_binaries_encoding: ?` +
`}`
templateDomainReplicationConfigType = `{` +
`active_cluster_name: ?, ` +
`clusters: ? ` +
`}`
templateCreateDomainQuery = `INSERT INTO domains (` +
`id, domain) ` +
`VALUES(?, {name: ?}) IF NOT EXISTS`
templateCreateDomainByNameQuery = `INSERT INTO domains_by_name (` +
`name, domain, config, replication_config, is_global_domain, config_version, failover_version) ` +
`VALUES(?, ` + templateDomainInfoType + `, ` + templateDomainConfigType + `, ` + templateDomainReplicationConfigType + `, ?, ?, ?) IF NOT EXISTS`
templateGetDomainQuery = `SELECT domain.name ` +
`FROM domains ` +
`WHERE id = ?` | `config.archival_bucket, config.archival_status, ` +
`config.history_archival_status, config.history_archival_uri, ` +
`config.visibility_archival_status, config.visibility_archival_uri, ` +
`config.bad_binaries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
|
templateGetDomainByNameQuery = `SELECT domain.id, domain.name, domain.status, domain.description, ` +
`domain.owner_email, domain.data, config.retention, config.emit_metric, ` + | random_line_split |
cassandraMetadataPersistence.go | return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
}
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(request.Name, ID)
}
func (m *cassandraMetadataPersistence) | ListDomains | identifier_name |
|
cassandraMetadataPersistence.go | gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.ConfigVersion,
request.FailoverVersion,
nextVersion,
request.Info.Name,
currentVersion,
)
applied, err := query.ScanCAS()
if !applied {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation encounter concurrent write."),
}
}
if err != nil {
return &workflow.InternalServiceError{
Message: fmt.Sprintf("UpdateDomain operation failed. Error %v", err),
}
}
return nil
}
func (m *cassandraMetadataPersistence) DeleteDomain(request *p.DeleteDomainRequest) error {
var name string
query := m.session.Query(templateGetDomainQuery, request.ID)
err := query.Scan(&name)
if err != nil {
if err == gocql.ErrNotFound {
return nil
}
return err
}
return m.deleteDomain(name, request.ID)
}
func (m *cassandraMetadataPersistence) DeleteDomainByName(request *p.DeleteDomainByNameRequest) error {
var ID string
query := m.session.Query(templateGetDomainByNameQuery, request.Name)
err := query.Scan(&ID, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
if err != nil {
if err == gocql.ErrNotFound | {
return nil
} | conditional_block |
|
cassandraMetadataPersistence.go | aries, config.bad_binaries_encoding, ` +
`replication_config.active_cluster_name, replication_config.clusters, ` +
`is_global_domain, ` +
`config_version, ` +
`failover_version, ` +
`db_version ` +
`FROM domains_by_name ` +
`WHERE name = ?`
templateUpdateDomainByNameQuery = `UPDATE domains_by_name ` +
`SET domain = ` + templateDomainInfoType + `, ` +
`config = ` + templateDomainConfigType + `, ` +
`replication_config = ` + templateDomainReplicationConfigType + `, ` +
`config_version = ? ,` +
`failover_version = ? ,` +
`db_version = ? ` +
`WHERE name = ? ` +
`IF db_version = ? `
templateDeleteDomainQuery = `DELETE FROM domains ` +
`WHERE id = ?`
templateDeleteDomainByNameQuery = `DELETE FROM domains_by_name ` +
`WHERE name = ?`
)
type (
cassandraMetadataPersistence struct {
cassandraStore
currentClusterName string
}
)
// newMetadataPersistence is used to create an instance of HistoryManager implementation
func newMetadataPersistence(cfg config.Cassandra, clusterName string, logger log.Logger) (p.MetadataStore,
error) {
cluster := NewCassandraCluster(cfg.Hosts, cfg.Port, cfg.User, cfg.Password, cfg.Datacenter)
cluster.Keyspace = cfg.Keyspace
cluster.ProtoVersion = cassandraProtoVersion
cluster.Consistency = gocql.LocalQuorum
cluster.SerialConsistency = gocql.LocalSerial
cluster.Timeout = defaultSessionTimeout
session, err := cluster.CreateSession()
if err != nil {
return nil, err
}
return &cassandraMetadataPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
currentClusterName: clusterName,
}, nil
}
// Close releases the resources held by this object
func (m *cassandraMetadataPersistence) Close() {
if m.session != nil {
m.session.Close()
}
}
// Cassandra does not support conditional updates across multiple tables. For this reason we have to first insert into
// 'Domains' table and then do a conditional insert into domains_by_name table. If the conditional write fails we
// delete the orphaned entry from domains table. There is a chance delete entry could fail and we never delete the
// orphaned entry from domains table. We might need a background job to delete those orphaned record.
func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {
query := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)
applied, err := query.MapScanCAS(make(map[string]interface{}))
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains table. Error: %v", err),
}
}
if !applied {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed because of uuid collision."),
}
}
query = m.session.Query(templateCreateDomainByNameQuery,
request.Info.Name,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI,
request.Config.VisibilityArchivalStatus,
request.Config.VisibilityArchivalURI,
request.Config.BadBinaries.Data,
string(request.Config.BadBinaries.GetEncoding()),
request.ReplicationConfig.ActiveClusterName,
p.SerializeClusterConfigs(request.ReplicationConfig.Clusters),
request.IsGlobalDomain,
request.ConfigVersion,
request.FailoverVersion,
)
previous := make(map[string]interface{})
applied, err = query.MapScanCAS(previous)
if err != nil {
return nil, &workflow.InternalServiceError{
Message: fmt.Sprintf("CreateDomain operation failed. Inserting into domains_by_name table. Error: %v", err),
}
}
if !applied {
// Domain already exist. Delete orphan domain record before returning back to user
if errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {
m.logger.Warn("Unable to delete orphan domain record", tag.Error(errDelete))
}
if domain, ok := previous["domain"].(map[string]interface{}); ok {
msg := fmt.Sprintf("Domain already exists. DomainId: %v", domain["id"])
return nil, &workflow.DomainAlreadyExistsError{
Message: msg,
}
}
return nil, &workflow.DomainAlreadyExistsError{
Message: fmt.Sprintf("CreateDomain operation failed because of conditional failure."),
}
}
return &p.CreateDomainResponse{ID: request.Info.ID}, nil
}
func (m *cassandraMetadataPersistence) GetDomain(request *p.GetDomainRequest) (*p.InternalGetDomainResponse, error) {
var query *gocql.Query
var err error
info := &p.DomainInfo{}
config := &p.InternalDomainConfig{}
replicationConfig := &p.DomainReplicationConfig{}
var replicationClusters []map[string]interface{}
var dbVersion int64
var failoverVersion int64
var configVersion int64
var isGlobalDomain bool
if len(request.ID) > 0 && len(request.Name) > 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name specified in request.",
}
} else if len(request.ID) == 0 && len(request.Name) == 0 {
return nil, &workflow.BadRequestError{
Message: "GetDomain operation failed. Both ID and Name are empty.",
}
}
handleError := func(name, ID string, err error) error {
identity := name
if len(ID) > 0 {
identity = ID
}
if err == gocql.ErrNotFound {
return &workflow.EntityNotExistsError{
Message: fmt.Sprintf("Domain %s does not exist.", identity),
}
}
return &workflow.InternalServiceError{
Message: fmt.Sprintf("GetDomain operation failed. Error %v", err),
}
}
domainName := request.Name
if len(request.ID) > 0 {
query = m.session.Query(templateGetDomainQuery, request.ID)
err = query.Scan(&domainName)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
}
var badBinariesData []byte
var badBinariesDataEncoding string
query = m.session.Query(templateGetDomainByNameQuery, domainName)
err = query.Scan(
&info.ID,
&info.Name,
&info.Status,
&info.Description,
&info.OwnerEmail,
&info.Data,
&config.Retention,
&config.EmitMetric,
&config.ArchivalBucket,
&config.ArchivalStatus,
&config.HistoryArchivalStatus,
&config.HistoryArchivalURI,
&config.VisibilityArchivalStatus,
&config.VisibilityArchivalURI,
&badBinariesData,
&badBinariesDataEncoding,
&replicationConfig.ActiveClusterName,
&replicationClusters,
&isGlobalDomain,
&configVersion,
&failoverVersion,
&dbVersion,
)
if err != nil {
return nil, handleError(request.Name, request.ID, err)
}
if info.Data == nil {
info.Data = map[string]string{}
}
config.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))
replicationConfig.ActiveClusterName = p.GetOrUseDefaultActiveCluster(m.currentClusterName, replicationConfig.ActiveClusterName)
replicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)
replicationConfig.Clusters = p.GetOrUseDefaultClusters(m.currentClusterName, replicationConfig.Clusters)
return &p.InternalGetDomainResponse{
Info: info,
Config: config,
ReplicationConfig: replicationConfig,
IsGlobalDomain: isGlobalDomain,
ConfigVersion: configVersion,
FailoverVersion: failoverVersion,
NotificationVersion: dbVersion,
TableVersion: p.DomainTableVersionV1,
}, nil
}
func (m *cassandraMetadataPersistence) UpdateDomain(request *p.InternalUpdateDomainRequest) error | {
var nextVersion int64 = 1
var currentVersion *int64
if request.NotificationVersion > 0 {
nextVersion = request.NotificationVersion + 1
currentVersion = &request.NotificationVersion
}
query := m.session.Query(templateUpdateDomainByNameQuery,
request.Info.ID,
request.Info.Name,
request.Info.Status,
request.Info.Description,
request.Info.OwnerEmail,
request.Info.Data,
request.Config.Retention,
request.Config.EmitMetric,
request.Config.ArchivalBucket,
request.Config.ArchivalStatus,
request.Config.HistoryArchivalStatus,
request.Config.HistoryArchivalURI, | identifier_body |
|
bikeshareproject.py | Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def | (city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n | load_data | identifier_name |
bikeshareproject.py | Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time)) |
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n | print('-'*40)
| random_line_split |
bikeshareproject.py | Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
|
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** | break | conditional_block |
bikeshareproject.py |
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
city = input('\nWould you like to see data for.?: Simply type the Name \n-> Chicago \n-> New York\n-> Washington\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(city == 'chicago' or city == 'new york' or city == 'washington' or city == 'all of them'):
break
else:
city = input('Sorry, I do not understand your input. Please input either '
'Chicago, New York, or Washington.\n(Enter Correct city):\t ').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for month (all, january, february, ... , june)
month = input('\nWhich month Data you want..?: Simply type the Name \n-> January \n-> February \n-> March \n-> April \n-> May \n-> June \n').lower()
#.lower() command is used to take input of any type formate
while(True):
if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):
break
else:
month = input('\nPlease try to Enter valid month otherwise it will invalid and Not showing any result:\n').lower()
#lower() command is used to take input of any type formate
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
day = input('Which day Data you want..? simply type the name \n-> Monday \n-> Tuesday \n-> Wednesday \n-> Thursday \n-> Friday \n-> Saturday \n-> Sunday \n-> all to display data of all days\n').lower()
#lower() command is used to take input of any type formate
while(True):
if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):
break;
else:
day = input('\nPlease try to Enter valid Day otherwise it will invalid and Not showing any result:\nEnter Correct day:\t ').lower()
#lower() command is used to take input of any type formate
#return day
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
# to_datetime command is used to convert(change) date into date format
df['End Time'] = pd.to_datetime(df['End Time'])
if month != 'all':
months = ['january', 'february', 'march', 'april', 'may', 'june']
#used to find index of month.
month = months.index(month) + 1
df = df[df['Start Time'].dt.month == month]
#filter data by day.
if day != 'all':
df = df[df['Start Time'].dt.weekday_name == day.title()]
#print 5 rows.
print(df.head())
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most regular Times of Travelling\n Loading Please wait for a while ........\n')
start_time = time.time()
# display the most common month
if(month == 'all'):
most_similar_month = df['Start Time'].dt.month.value_counts().idxmax()
print('******************** Most common(popular) month is :'+ str(most_similar_month) + ' *********************')
# display the most common day of week
if(day == 'all'):
most_similar_day = df['Start Time'].dt.weekday_name.value_counts().idxmax()
print('******************** Most common(popular) day is : ' + str(most_similar_day) + ' *********************')
# display the most common start hour
most_similar_hour = df['Start Time'].dt.hour.value_counts().idxmax()
print('******************** Most popular hour is : ' + str(most_similar_hour) + ' *********************')
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\n******************** Calculating The Most Popular Stations and Trip... *********************\n')
start_time = time.time()
# display most commonly used start station
most_similar_start_station = st.mode(df['Start Station'])
print('\n******************** Most common start station is {} *********************\n'.format(most_similar_start_station))
# display most commonly used end station
most_similar_end_station = st.mode(df['End Station'])
print('\n******************** Most common end station is {} *********************\n'.format(most_similar_end_station))
# display most frequent combination of start station and end station trip
combination_trip = df['Start Station'].astype(str) + " to " + df['End Station'].astype(str)
The_most_frequent_trip = combination_trip.value_counts().idxmax()
print('\n******************** Most popular trip is from {} *********************\n'.format(The_most_frequent_trip))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\n******************** Calculating Trip Duration... *********************\n')
start_time = time.time()
# display total travel time
total_travel_time = df['Trip Duration'].sum()
time1 = total_travel_time
day = time1 // (24 * 3600)
time1 = time1 % (24 * 3600)
hour = time1 // 3600
time1 %= 3600
minutes = time1 // 60
time1 %= 60
seconds = time1
print('\n******************** Total travel time is {} days {} hours {} minutes {} seconds *********************'.format(day, hour, minutes, seconds))
# display mean travel time
mean_travel_time = df['Trip Duration'].mean()
time2 = mean_travel_time
day2 = time2 // (24 * 3600)
time2 = time2 % (24 * 3600)
hour2 = time2 // 3600
time2 %= 3600
minutes2 = time2 // 60
time2 %= 60
seconds2 = time2
print('\n******************** Mean travel time is {} hours {} minutes {} seconds *********************'.format(hour2, minutes2, seconds2))
print("\n******************** This took %s seconds. *********************" % (time.time() - start_time))
print('-'*40)
def user_stats(df):
| """Displays statistics on bikeshare users."""
print('\n******************** Calculating User Stats... *********************\n')
start_time = time.time()
# Display counts of user types
no_of_subscribing_user = df['User Type'].str.count('Subscriber').sum()
no_of_customers_using = df['User Type'].str.count('Customer').sum()
print('\n******************** Number of subscribers are {} *********************\n'.format(int(no_of_subscribing_user)))
print('\n******************** Number of customers(users) are {} *********************\n'.format(int(no_of_customers_using)))
# Display counts of gender
if('Gender' in df):
count_male = df['Gender'].str.count('Male').sum()
count_female = df['Gender'].str.count('Female').sum()
print('\n******************** Number of male users are {} *********************\n'.format(int(count_male)))
print('\n******************** Number of female users are {} *********************\n'.format(int(count_female)))
# Display earliest, most recent, and most common year of birth | identifier_body |
|
bparser.py | (object):
"""
A parser for reading BibTeX bibliographic data files.
Example::
from bibtexparser.bparser import BibTexParser
bibtex_str = ...
parser = BibTexParser()
parser.ignore_nonstandard_types = False
parser.homogenise_fields = False
bib_database = bibtexparser.loads(bibtex_str, parser)
"""
def __new__(cls, data=None,
customization=None,
ignore_nonstandard_types=True,
homogenise_fields=True):
"""
To catch the old API structure in which creating the parser would immediately parse and return data.
"""
if data is None:
return super(BibTexParser, cls).__new__(cls)
else:
# For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the
# parser.
parser = BibTexParser()
parser.customization = customization
parser.ignore_nonstandard_types = ignore_nonstandard_types
parser.homogenise_fields = homogenise_fields
return parser.parse(data)
def __init__(self):
"""
Creates a parser for rading BibTeX files
:return: parser
:rtype: `BibTexParser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a | BibTexParser | identifier_name |
|
bparser.py | ex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
| cnt = 0
for i in range(0, len(val)):
if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break
if i == len(val) - 1:
return True
else:
return False | identifier_body |
|
bparser.py | ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val
else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val)
elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
d[inkey] = self._add_val(inval)
inkey = ""
inval = ""
else:
logger.debug('This line does NOT represent the end of the current key-pair value')
logger.debug('All lines have been treated')
if not d:
logger.debug('The dict is empty, return it.')
return d
d['ENTRYTYPE'] = bibtype
d['ID'] = id
if customization is None:
logger.debug('No customization to apply, return dict')
return d
else:
# apply any customizations to the record object then return it
logger.debug('Apply customizations and return dict')
return customization(d)
def _strip_quotes(self, val):
"""Strip double quotes enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip quotes')
val = val.strip()
if val.startswith('"') and val.endswith('"'):
return val[1:-1]
return val
def _strip_braces(self, val):
"""Strip braces enclosing string
:param val: a value
:type val: string
:returns: string -- value
"""
logger.debug('Strip braces')
val = val.strip()
if val.startswith('{') and val.endswith('}') and self._full_span(val):
return val[1:-1]
return val
def _full_span(self, val):
cnt = 0
for i in range(0, len(val)):
| if val[i] == '{':
cnt += 1
elif val[i] == '}':
cnt -= 1
if cnt == 0:
break | conditional_block |
|
bparser.py | Parser`
"""
self.bib_database = BibDatabase()
#: Callback function to process BibTeX entries after parsing, for example to create a list from a string with
#: multiple values. By default all BibTeX values are treated as simple strings. Default: `None`.
self.customization = None
#: Ignore non-standard BibTeX types (`book`, `article`, etc). Default: `True`.
self.ignore_nonstandard_types = True
#: Sanitise BibTeX field names, for example change `url` to `link` etc. Field names are always converted to
#: lowercase names. Default: `True`.
self.homogenise_fields = True
# On some sample data files, the character encoding detection simply
# hangs We are going to default to utf8, and mandate it.
self.encoding = 'utf8'
# pre-defined set of key changes
self.alt_dict = {
'keyw': 'keyword',
'keywords': 'keyword',
'authors': 'author',
'editors': 'editor',
'url': 'link',
'urls': 'link',
'links': 'link',
'subjects': 'subject'
}
self.replace_all_re = re.compile(r'((?P<pre>"?)\s*(#|^)\s*(?P<id>[^\d\W]\w*)\s*(#|$)\s*(?P<post>"?))', re.UNICODE)
def _bibtex_file_obj(self, bibtex_str):
# Some files have Byte-order marks inserted at the start
byte = '\xef\xbb\xbf'
if not isinstance(byte, ustr):
byte = ustr('\xef\xbb\xbf', self.encoding, 'ignore')
if bibtex_str[:3] == byte:
bibtex_str = bibtex_str[3:]
return StringIO(bibtex_str)
def parse(self, bibtex_str):
"""Parse a BibTeX string into an object
:param bibtex_str: BibTeX string
:type: str or unicode
:return: bibliographic database
:rtype: BibDatabase
"""
self.bibtex_file_obj = self._bibtex_file_obj(bibtex_str)
self._parse_records(customization=self.customization)
return self.bib_database
def parse_file(self, file):
"""Parse a BibTeX file into an object
:param file: BibTeX file or file-like object
:type: file
:return: bibliographic database
:rtype: BibDatabase
"""
return self.parse(file.read())
def _parse_records(self, customization=None):
"""Parse the bibtex into a list of records.
:param customization: a function
"""
def _add_parsed_record(record, records):
"""
Atomic function to parse a record
and append the result in records
"""
if record != "":
logger.debug('The record is not empty. Let\'s parse it.')
parsed = self._parse_record(record, customization=customization)
if parsed:
logger.debug('Store the result of the parsed record')
records.append(parsed)
else:
logger.debug('Nothing returned from the parsed record!')
else:
logger.debug('The record is empty')
records = []
record = ""
# read each line, bundle them up until they form an object, then send for parsing
for linenumber, line in enumerate(self.bibtex_file_obj):
logger.debug('Inspect line %s', linenumber)
if line.strip().startswith('@'):
# Remove leading whitespaces
line = line.lstrip()
logger.debug('Line starts with @')
# Parse previous record
_add_parsed_record(record, records)
# Start new record
logger.debug('The record is set to empty')
record = ""
# Keep adding lines to the record
record += line
# catch any remaining record and send it for parsing
_add_parsed_record(record, records)
logger.debug('Set the list of entries')
self.bib_database.entries = records
def _parse_record(self, record, customization=None):
"""Parse a record.
* tidy whitespace and other rubbish
* parse out the bibtype and citekey
* find all the key-value pairs it contains
:param record: a record
:param customization: a function
:returns: dict --
"""
d = {}
if not record.startswith('@'):
logger.debug('The record does not start with @. Return empty dict.')
return {}
# if a comment record, add to bib_database.comments
if record.lower().startswith('@comment'):
logger.debug('The record startswith @comment')
logger.debug('Store comment in list of comments')
self.bib_database.comments.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# if a preamble record, add to bib_database.preambles
if record.lower().startswith('@preamble'):
logger.debug('The record startswith @preamble')
logger.debug('Store preamble in list of preambles')
self.bib_database.preambles.append(re.search('\{(.*)\}', record, re.DOTALL).group(1))
logger.debug('Return an empty dict')
return {}
# prepare record
record = '\n'.join([i.strip() for i in record.split('\n')])
if '}\n' in record:
logger.debug('}\\n detected in the record. Clean up.')
record = record.replace('\r\n', '\n').replace('\r', '\n').rstrip('\n')
# treat the case for which the last line of the record
# does not have a coma
if record.endswith('}\n}') or record.endswith('}}'):
logger.debug('Missing coma in the last line of the record. Fix it.')
record = re.sub('}(\n|)}$', '},\n}', record)
# if a string record, put it in the replace_dict
if record.lower().startswith('@string'):
logger.debug('The record startswith @string')
key, val = [i.strip().strip('{').strip('}').replace('\n', ' ') for i in record.split('{', 1)[1].strip('\n').strip(',').strip('}').split('=')]
key = key.lower() # key is case insensitive
val = self._string_subst_partial(val)
if val.startswith('"') or val.lower() not in self.bib_database.strings:
self.bib_database.strings[key] = val.strip('"')
else:
self.bib_database.strings[key] = self.bib_database.strings[val.lower()]
logger.debug('Return a dict')
return d
# for each line in record
logger.debug('Split the record of its lines and treat them')
kvs = [i.strip() for i in record.split(',\n')]
inkey = ""
inval = ""
for kv in kvs:
logger.debug('Inspect: %s', kv)
# TODO: We may check that the keyword belongs to a known type
if kv.startswith('@') and not inkey:
# it is the start of the record - set the bibtype and citekey (id)
logger.debug('Line starts with @ and the key is not stored yet.')
bibtype, id = kv.split('{', 1)
bibtype = self._add_key(bibtype)
id = id.strip('}').strip(',')
logger.debug('bibtype = %s', bibtype)
logger.debug('id = %s', id)
if self.ignore_nonstandard_types and bibtype not in ('article',
'book',
'booklet',
'conference',
'inbook',
'incollection',
'inproceedings',
'manual',
'mastersthesis',
'misc',
'phdthesis',
'proceedings',
'techreport',
'unpublished'):
logger.warning('Entry type %s not standard. Not considered.', bibtype)
break
elif '=' in kv and not inkey:
# it is a line with a key value pair on it
logger.debug('Line contains a key-pair value and the key is not stored yet.')
key, val = [i.strip() for i in kv.split('=', 1)]
key = self._add_key(key)
val = self._string_subst_partial(val)
# if it looks like the value spans lines, store details for next loop
if (val.count('{') != val.count('}')) or (val.startswith('"') and not val.replace('}', '').endswith('"')):
logger.debug('The line is not ending the record.')
inkey = key
inval = val | elif inkey:
logger.debug('Continues the previous line to complete the key pair value...')
# if this line continues the value from a previous line, append
inval += ', ' + kv
# if it looks like this line finishes the value, store it and clear for next loop
if (inval.startswith('{') and inval.endswith('}')) or (inval.startswith('"') and inval.endswith('"')):
logger.debug('This line represents the end of the current key-pair value')
| else:
logger.debug('The line is the end of the record.')
d[key] = self._add_val(val) | random_line_split |
fill_data.py | (file_data, ntts=[]):
'''
Inicializa el diccionario de datos del archivo
'''
content_fixes(file_data)
file_data["id"]=mid2url(file_data['_id'])
file_data['name']=file_data['src'].itervalues().next()['url']
file_se = file_data["se"] if "se" in file_data else None
ntt = ntts[int(float(file_se["_id"]))] if file_se and "_id" in file_se and file_se["_id"] in ntts else None
if ntt:
file_se["info"] = ntt
file_se["rel"] = [ntts[relid] for relids in ntt["r"].itervalues() for relid in relids if relid in ntts] if "r" in ntt else []
return {"file":file_data,"view":{}}
def choose_filename(f,text_cache=None):
'''
Elige el archivo correcto
'''
srcs = f['file']['src']
fns = f['file']['fn']
chosen = None
max_count = -1
current_weight = -1
if text_cache and text_cache[0] in fns: # Si text es en realidad un ID de fn
chosen = text_cache[0]
else:
for hexuri,src in srcs.items():
if 'bl' in src and src['bl']!=0:
continue
for crc,srcfn in src['fn'].items():
if crc not in fns: #para los sources que tienen nombre pero no estan en el archivo
continue
#si no tiene nombre no se tiene en cuenta
m = srcfn['m'] if len(fns[crc]['n'])>0 else 0
if 'c' in fns[crc]:
fns[crc]['c']+=m
else:
fns[crc]['c']=m
text_weight = 0
if text_cache:
fn_parts = slugify(fns[crc]['n']).strip().split(" ")
if len(fn_parts)>0:
text_words = slugify(text_cache[0]).split(" ")
# valora numero y orden coincidencias
last_pos = -1
max_length = length = 0
occurrences = [0]*len(text_words)
for part in fn_parts:
pos = text_words.index(part) if part in text_words else -1
if pos != -1 and (last_pos==-1 or pos==last_pos+1):
length += 1
else:
if length > max_length: max_length = length
length = 0
if pos != -1:
occurrences[pos]=1
last_pos = pos
if length > max_length: max_length = length
text_weight = sum(occurrences)*100 + max_length
f['file']['fn'][crc]['tht'] = text_weight
better = fns[crc]['c']>max_count
if text_weight > current_weight or (better and text_weight==current_weight):
current_weight = text_weight
chosen = crc
max_count = fns[crc]['c']
f['view']['url'] = mid2url(hex2mid(f['file']['_id']))
f['view']['fnid'] = chosen
if chosen:
filename = fns[chosen]['n']
ext = fns[chosen]['x']
else: #uses filename from src
filename = ""
for hexuri,src in srcs.items():
if src['url'].find("/")!=-1:
filename = src['url']
if filename=="":
return
filename = filename[filename.rfind("/")+1:]
ext = filename[filename.rfind(".")+1:]
filename = filename[0:filename.rfind(".")]
#TODO si no viene nombre de archivo buscar en los metadatos para formar uno (por ejemplo serie - titulo capitulo)
filename = extension_filename(filename,ext)
f['view']['fn'] = filename.replace("?", "")
f['view']['qfn'] = qfn = u(filename).encode("UTF-8") #nombre del archivo escapado para generar las url de descarga
f['view']['pfn'] = urllib.quote(qfn).replace(" ", "%20") # P2P filename
nfilename = seoize_text(filename, " ",True, 0)
f['view']['nfn'] = nfilename
# añade el nombre del fichero como palabra clave
g.keywords.update(set(keyword for keyword in nfilename.split(" ") if len(keyword)>1))
#nombre del archivo con las palabras que coinciden con la busqueda resaltadas
if text_cache:
f['view']['fnh'], f['view']['fnhs'] = highlight(text_cache[2],filename,True)
else:
f['view']['fnh'] = filename #esto es solo para download que nunca tiene text
return current_weight>0 # indica si ha encontrado el texto buscado
def build_source_links(f):
'''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
# agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnut | init_data | identifier_name |
|
fill_data.py | # agrupación de origenes
source_groups = {}
file_sources = f['file']['src'].items()
file_sources.sort(key=lambda x:x[1]["t"])
for hexuri,src in file_sources:
if not src.get('bl',None) in (0, None):
continue
url_pattern=downloader=join=False
count=0
part=url=""
source_data=g.sources[src["t"]] if "t" in src and src["t"] in g.sources else None
if source_data is None: #si no existe el origen del archivo
logging.error("El fichero contiene un origen inexistente en la tabla \"sources\": %s" % src["t"], extra={"file":f})
if feedbackdb.initialized:
feedbackdb.notify_source_error(f['file']["_id"], f['file']["s"])
continue
elif "crbl" in source_data and int(source_data["crbl"])==1: #si el origen esta bloqueado
continue
elif "w" in source_data["g"] or "f" in source_data["g"] or "s" in source_data["g"]: #si es descarga directa o streaming
link_weight=1
tip=source_data["d"]
icon="web"
source_groups[icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
| ''
Construye los enlaces correctamente
'''
def get_domain(src):
'''
Devuelve el dominio de una URL
'''
url_parts=urlparse(src).netloc.split('.')
i=len(url_parts)-1
if len(url_parts[i])<=2 and len(url_parts[i-1])<=3:
return url_parts[i-2]+'.'+url_parts[i-1]+'.'+url_parts[i]
else:
return url_parts[i-1]+'.'+url_parts[i];
f['view']['action']='download'
f['view']['sources']={}
max_weight=0
icon=""
any_downloader=False
| identifier_body |
|
fill_data.py | _data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value in f['file']['md'].iteritems()}
# Duración para vídeo e imágenes
seconds = get_float(file_md, "seconds")
minutes = get_float(file_md, "minutes")
hours = get_float(file_md, "hours")
# Si no he recibido duracion de otra forma, pruebo con length y duration
if seconds==minutes==hours==None:
seconds = get_float(file_md, "length") or get_float(file_md, "duration")
duration = [hours or 0, minutes or 0, seconds or 0] # h, m, s
if any(duration):
carry = 0
for i in xrange(len(duration)-1,-1,-1):
unit = long(duration[i]) + carry
duration[i] = unit%60
carry = unit/60
view_md["length"] = "%d:%02d:%02d" % tuple(duration) if duration[0] > 0 else "%02d:%02d" % tuple(duration[1:])
# Tamaño para vídeos e imágenes
width = get_int(file_md, "width")
height = get_int(file_md, "height")
if width and height:
view_md["size"] = "%dx%dpx" % (width, height)
# Metadatos que no cambian
try:
view_md.update(
(meta, file_md[meta]) for meta in
(
"folders","description","fileversion","os","files","pages","format",
"seeds","leechs","composer","publisher","encoding","director","writer","starring","producer","released"
) if meta in file_md
)
view_searches.update(
(meta, seoize_text(file_md[meta],"_",False)) for meta in
(
"folders","os","composer","publisher","director","writer","starring","producer"
) if meta in file_md
)
except BaseException as e:
logging.warn(e)
# thumbnail
if "thumbnail" in file_md:
f["view"]["thumbnail"] = file_md["thumbnail"]
#metadatos que tienen otros nombres
try:
view_md.update(("tags", file_md[meta]) for meta in ("keywords", "tags", "tag") if meta in file_md)
if "tags" in view_md and isinstance(view_md["tags"], basestring):
view_searches["tags"] = []
view_md.update(("comments", file_md[meta]) for meta in ("comments", "comment") if meta in file_md)
view_md.update(("track", file_md[meta]) for meta in ("track", "track_number") if meta in file_md)
view_md.update(("created_by", file_md[meta]) for meta in ("created_by", "encodedby","encoder") if meta in file_md)
view_md.update(("language", file_md[meta]) for meta in ("language", "lang") if meta in file_md)
view_md.update(("date", file_md[meta]) for meta in ("published", "creationdate") if meta in file_md)
view_md.update(("trackers", "\n".join(file_md[meta].split(" "))) for meta in ("trackers", "tracker") if meta in file_md and isinstance(file_md[meta], basestring))
view_md.update(("hash", file_md[meta]) for meta in ("hash", "infohash") if meta in file_md)
view_md.update(("visualizations", file_md[meta]) for meta in ("count", "viewCount") if meta in file_md)
if "unpackedsize" in file_md:
view_md["unpacked_size"]=file_md["unpackedsize"]
if "privateflag" in file_md:
view_md["private_file"]=file_md["privateflag"]
except BaseException as e:
logging.warn(e)
#torrents -> filedir filesizes filepaths
if "filepaths" in file_md:
filepath | s = {}
for path, size in izip_longest(u(file_md["filepaths"]).split("///"), u(file_md.get("filesizes","")).split(" "), fillvalue=None):
# no permite tamaños sin fichero
if not path: break
parts = path.strip("/").split("/")
# crea subdirectorios
relative_path = filepaths
for part in parts[:-1]:
if "/"+part not in relative_path:
relative_path["/"+part] = {}
relative_path = relative_path["/"+part]
# si ya existe el directorio no hace nada
if "/"+parts[-1] in relative_path:
pass
# si el ultimo nivel se repite es un directorio (fallo de contenido)
elif parts[-1] in relative_path:
relative_path["/"+parts[-1]] = {}
del relative_path[parts[-1]] | conditional_block |
|
fill_data.py | icon] = tip
source=get_domain(src['url']) if "f" in source_data["g"] else source_data["d"]
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
#en caso de duda se prefiere streaming
if "s" in source_data["g"]:
f['view']['action']="listen" if f['view']['ct']==CONTENT_AUDIO else 'watch'
link_weight*=2
#torrenthash antes de torrent porque es un caso especifico
elif source_data["d"]=="BitTorrentHash":
downloader=True
link_weight=0.9 if 'torrent:tracker' in f['file']['md'] or 'torrent:trackers' in f['file']['md'] else 0.1
tip="Torrent MagnetLink"
source="tmagnet"
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip # magnet link tiene menos prioridad para el texto
join=True
count=int(src['m'])
part="xt=urn:btih:"+src['url']
if 'torrent:tracker' in f['file']['md']:
part += unicode('&tr=' + urllib.quote_plus(u(f['file']['md']['torrent:tracker']).encode("UTF-8")), "UTF-8")
elif 'torrent:trackers' in f['file']['md']:
trackers = f['file']['md']['torrent:trackers']
if isinstance(trackers, basestring):
part += unicode("".join('&tr='+urllib.quote_plus(tr) for tr in u(trackers).encode("UTF-8").split(" ")), "UTF-8")
elif "t" in source_data["g"]:
downloader=True
link_weight=0.8
url=src['url']
if "url_pattern" in source_data and not url.startswith(("https://","http://","ftp://")):
url_pattern=True
tip=source=get_domain(source_data["url_pattern"]%url)
else:
tip=source=get_domain(src['url'])
icon="torrent"
if not icon in source_groups:
source_groups[icon] = tip
elif source_data["d"]=="Gnutella":
link_weight=0.2
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:sha1:"+src['url']
join=True
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="eD2k":
downloader=True
link_weight=0.1
tip="eD2k"
source=icon="ed2k"
url="ed2k://|file|"+f['view']['pfn']+"|"+str(f['file']['z'] if "z" in f["file"] else 1)+"|"+src['url']+"|/"
count=int(src['m'])
source_groups[icon] = tip
elif source_data["d"]=="Tiger":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:tiger:"+src['url']
join=True
elif source_data["d"]=="MD5":
link_weight=0
tip="Gnutella"
source=icon="gnutella"
part="xt=urn:md5:"+src['url']
source_groups[icon] = tip
join=True
else:
continue
if source in f['view']['sources']:
view_source = f['view']['sources'][source]
else:
view_source = f['view']['sources'][source] = {}
view_source.update(source_data)
if downloader:
any_downloader = True
view_source['downloader']=1
elif not 'downloader' in view_source:
view_source['downloader']=0
view_source['tip']=tip
view_source['icon']=icon
view_source['icons']=source_data.get("icons",False)
view_source['join']=join
view_source['source']="streaming" if "s" in source_data["g"] else "direct_download" if "w" in source_data["g"] else "P2P" if "p" in source_data["g"] else ""
#para no machacar el numero si hay varios archivos del mismo source
if not 'count' in view_source or count>0:
view_source['count']=count
if not "parts" in view_source:
view_source['parts']=[]
if not 'urls' in view_source:
view_source['urls']=[]
if part:
view_source['parts'].append(part)
if url:
if url_pattern:
view_source['urls']=[source_data["url_pattern"]%url]
f['view']['source_id']=url
view_source["pattern_used"]=True
elif not "pattern_used" in view_source:
view_source['urls'].append(url)
if source_data["d"]!="eD2k":
view_source['count']+=1
if link_weight>max_weight:
max_weight = link_weight
f['view']['source'] = source
f['view']['source_groups'] = sorted(source_groups.items())
f['view']['any_downloader'] = any_downloader
if "source" not in f["view"]:
raise FileNoSources
if icon!="web":
for src,info in f['view']['sources'].items():
if info['join']:
f['view']['sources'][src]['urls'].append("magnet:?"+"&".join(info['parts'])+"&dn="+f['view']['pfn']+("&xl="+str(f['file']['z']) if 'z' in f['file'] else ""))
elif not 'urls' in info:
del(f['view']['sources'][src])
def choose_file_type(f):
'''
Elige el tipo de archivo
'''
ct, file_tags, file_format = guess_doc_content_type(f["file"], g.sources)
f['view']["ct"] = ct
f['view']['file_type'] = CONTENTS[ct].lower()
f['view']["tags"] = file_tags
if file_format: f['view']['format'] = file_format
def get_images(f):
'''
Obtiene las imagenes para los archivos que las tienen
'''
images = images_id = None
if "i" in f["file"] and isinstance(f["file"]["i"],list):
images = f["file"]["i"]
images_id = f["file"]["_id"]
elif "se" in f["file"] and "info" in f["file"]["se"]:
for ntt in chain([f["file"]["se"]["info"]], f["file"]["se"]["rel"]):
if "im" in ntt:
images = ntt["im"]
images_id = "e_%d_"%int(ntt["_id"])
break
if images:
images_servers=[]
for image in images:
server=g.image_servers[image]
images_servers.append("%02d"%int(server["_id"]))
if not "first_image_server" in f["view"]:
f["view"]["first_image_server"]=server["ip"]
f["view"]["images_server"]="_".join(images_servers)
f["view"]["images_id"] = images_id
def get_int(adict, key):
if not key in adict:
return None | value = adict[key]
if isinstance(value, (int,long)):
return value
elif isinstance(value, float):
return int(value)
elif isinstance(value, basestring):
result = None
for c in value:
digit = ord(c)-48
if 0<=digit<=9:
if result:
result *= 10
else:
result = 0
result += digit
else:
break
return result
return None
def get_float(adict, key):
if not key in adict:
return None
value = adict[key]
if isinstance(value, float):
return value
elif isinstance(value, (int,long)):
return float(value)
elif isinstance(value, basestring):
result = ""
decimal = False
for c in value:
if c in "0123456789":
result += c
elif c in ".," and not decimal:
result += "."
decimal = True
else:
break
if result:
try:
return float(result)
except:
pass
return None
def format_metadata(f,text_cache, search_text_shown=False):
'''
Formatea los metadatos de los archivos
'''
text = text_cache[2] if text_cache else None
view_md = f['view']['md'] = {}
view_searches = f["view"]["searches"]={}
file_type = f['view']['file_type'] if 'file_type' in f['view'] else None
if 'md' in f['file']:
#si viene con el formato tipo:metadato se le quita el tipo
file_md = {(meta.split(":")[-1] if ":" in meta else meta): value for meta, value | random_line_split |
|
main.rs | Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn main() {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?;
let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> | ]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
}
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n | {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())), | identifier_body |
main.rs | Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn | () {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?;
let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())),
]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
}
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n | main | identifier_name |
main.rs | , Message};
use rdkafka::producer::{BaseProducer, BaseRecord, DeliveryResult, ProducerContext};
use rdkafka::producer::{FutureProducer, FutureRecord};
use rdkafka::topic_partition_list::TopicPartitionList;
use rdkafka::util::get_rdkafka_version;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::future::Future;
use std::thread::sleep;
use std::time::Duration;
mod avro_encode;
struct CustomContext;
impl ClientContext for CustomContext {}
impl ConsumerContext for CustomContext {
fn pre_rebalance(&self, rebalance: &Rebalance) {
info!("Pre rebalance {:?}", rebalance);
}
fn post_rebalance(&self, rebalance: &Rebalance) {
info!("Post rebalance {:?}", rebalance);
}
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
info!("Committing offsets: {:?}", result);
}
}
type LoggingConsumer = StreamConsumer<CustomContext>;
#[tokio::main]
async fn main() {
let matches = App::new("consumer example")
.version(option_env!("CARGO_PKG_VERSION").unwrap_or(""))
.about("Simple command line consumer")
.arg(
Arg::with_name("brokers")
.short("b")
.long("brokers")
.help("Broker list in kafka format")
.takes_value(true)
.default_value("localhost:9092"),
)
.arg(
Arg::with_name("group-id")
.short("g")
.long("group-id")
.help("Consumer group id")
.takes_value(true)
.default_value("example_consumer_group_id"),
)
.arg(
Arg::with_name("log-conf")
.long("log-conf")
.help("Configure the logging format (example: 'rdkafka=trace')")
.takes_value(true),
)
.arg(
Arg::with_name("topics")
.short("t")
.long("topics")
.help("Topic list")
.takes_value(true)
.multiple(true)
.required(true),
)
.get_matches();
// setup_logger(true, matches.value_of("log-conf"));
log4rs::init_file("log4rs.yml", Default::default())
.expect("'log4rs.yml' not found. Required for logging.");
// let (version_n, version_s) = get_rdkafka_version();
// info!("rd_kafka_version: 0x{:08x}, {}", version_n, version_s);
let brokers = matches.value_of("brokers").unwrap();
let topics = matches.values_of("topics").unwrap().collect::<Vec<&str>>();
let group_id = matches.value_of("group-id").unwrap();
info!("Brokers: ({:?})", brokers);
info!("Topics: ({:?})", topics);
info!("Group Id: ({:?})", group_id);
// This sends over the schema in the payload
// let payload = serialize().unwrap();
// publish(brokers, topics[0], &payload).await;
// This uses `encode` to send a minimal payload
let payload = serialize2().unwrap();
publish(brokers, topics[0], &payload).await;
// Code for consumer
// let context = CustomContext;
// let consumer = get_consumer(context, brokers, group_id, &topics);
// process_message_stream(&consumer).await;
}
async fn publish(brokers: &str, topic_name: &str, payload: &[u8]) {
let producer: FutureProducer = ClientConfig::new()
.set("bootstrap.servers", brokers)
.set("message.timeout.ms", "5000")
.create()
.expect("Producer creation error");
let res = producer
.send(
FutureRecord::to(topic_name)
.payload(payload)
.key(&format!("Key1"))
.headers(OwnedHeaders::new().add("header_key", "header_value")),
0,
)
.await;
info!("Future completed. Result: {:?}", res);
}
fn get_consumer<'a>(
context: CustomContext,
brokers: &str,
group_id: &str,
topics: &[&str],
) -> LoggingConsumer {
let consumer: LoggingConsumer = ClientConfig::new()
.set("group.id", group_id)
.set("bootstrap.servers", brokers)
.set("enable.partition.eof", "false")
.set("session.timeout.ms", "6000")
.set("enable.auto.commit", "true")
//.set("statistics.interval.ms", "30000")
//.set("auto.offset.reset", "smallest")
.set_log_level(RDKafkaLogLevel::Debug)
.create_with_context(context)
.expect("Consumer creation failed");
consumer
.subscribe(&topics.to_vec())
.expect("Can't subscribe to specified topics");
consumer
}
fn get_schema() -> Schema {
let schema = r#"{
"type": "record",
"name": "envelope",
"fields": [
{
"name": "before",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
},
{
"name": "after",
"type": [
"null",
{
"type": "record",
"name": "row",
"fields": [
{"name": "FirstName", "type": "string"},
{"name": "LastName", "type": "string"}
]
}
]
}
]
}"#;
// parse_schema(schema).unwrap()
Schema::parse_str(schema).unwrap()
}
fn serialize() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::with_codec(&schema, Vec::new(), Codec::Null);
let mut record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Pink".to_string())),
(
"LastName".to_string(),
Value::String("Elephants".to_string()),
),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Donnatella".to_string()),
),
("LastName".to_string(), Value::String("Moss".to_string())),
]))),
),
]);
writer.append(record)?;
writer.flush()?; | let input = writer.into_inner();
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
let output = [&body[..], &input[..]].concat();
Ok(output)
}
fn serialize2() -> Result<Vec<u8>, Error> {
let schema = get_schema();
let mut writer = Writer::new(&schema, Vec::new());
let record = Value::Record(vec![
(
"before".to_string(),
Value::Union(Box::new(Value::Record(vec![
("FirstName".to_string(), Value::String("Greg".to_string())),
("LastName".to_string(), Value::String("Berns".to_string())),
]))),
),
(
"after".to_string(),
Value::Union(Box::new(Value::Record(vec![
(
"FirstName".to_string(),
Value::String("Hilbert".to_string()),
),
("LastName".to_string(), Value::String("McDugal".to_string())),
]))),
),
]);
//add header info: '01111'
// required for https://github.com/MaterializeInc/materialize/blob/master/src/interchange/avro.rs#L394
let mut body = [b'O', b'1', b'1', b'1', b'1'].to_vec();
encode(&record, &schema, &mut body);
Ok(body)
}
fn deserialize(bytes: &Vec<u8>) -> Result<String, Error> {
let schema = get_schema();
let out = match avro_rs::Reader::with_schema(&schema, &bytes[..]) {
Ok(reader) => {
let value = reader.map(|v| format!("{:?}", v)).collect::<Vec<String>>();
// let value =
// avro_rs::from_avro_datum(&schema, &mut bytes.clone(), Some(&schema));
format!("Value: {:?}", value)
// format!("{:?}", decode(&schema, &mut s.clone()))
}
Err(e) => {
println!("Reader ERROR: {:?}", e);
"".to_string()
}
};
Ok(out)
}
#[test]
fn serialize_deserialize() {
// let schema = get_schema();
// println!("Schema: {:?}", schema);
// panic!("forced panic");
let bytes = serialize2().unwrap();
println!(
"in bytes: len {:?}, \n {:?}, \n | random_line_split |
|
logg.go | highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
LevelSpecs[level].Colorizer(joinStrings(" ", e.Error())),
),
)
return true
}
}
return false
}
}
// joinStrings constructs a string from an slice of interface same as Println but
// without the terminal newline
func joinStrings(sep string, a ...interface{}) (o string) {
for i := range a {
o += fmt.Sprint(a[i])
if i < len(a)-1 {
o += sep
}
}
return
}
// getLoc calls runtime.Caller and formats as expected by source code editors
// for terminal hyperlinks
//
// Regular expressions and the substitution texts to make these clickable in
// Tilix and other RE hyperlink configurable terminal emulators:
//
// This matches the shortened paths generated in this command and printed at
// the very beginning of the line as this logger prints:
//
// ^((([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 $GOPATH/src/github.com/p9c/matrjoska/$2
//
// I have used a shell variable there but tilix doesn't expand them,
// so put your GOPATH in manually, and obviously change the repo subpath.
//
//
// Change the path to use with another repository's logging output (
// someone with more time on their hands could probably come up with
// something, but frankly the custom links feature of Tilix has the absolute
// worst UX I have encountered since the 90s...
// Maybe in the future this library will be expanded with a tool that more
// intelligently sets the path, ie from CWD or other cleverness.
//
// This matches full paths anywhere on the commandline delimited by spaces:
//
// ([/](([\/a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+))
//
// goland --line $5 /$2
//
// Adapt the invocation to open your preferred editor if it has the capability,
// the above is for Jetbrains Goland
//
func getLoc(skip int, level int32, subsystem string) (output string) {
_, file, line, _ := runtime.Caller(skip)
defer func() {
if r := recover(); r != nil {
fmt.Fprintln(os.Stderr, "getloc panic on subsystem", subsystem, file)
}
}()
split := strings.Split(file, subsystem)
if len(split) < 2 {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
file, ":", line,
),
)
} else {
output = fmt.Sprint(
color.White.Sprint(subsystem),
color.Gray.Sprint(
split[1], ":", line,
),
)
}
return
}
// DirectionString is a helper function that returns a string that represents the direction of a connection (inbound or outbound).
func DirectionString(inbound bool) string {
if inbound {
return "inbound"
}
return "outbound"
}
func PickNoun(n int, singular, plural string) string {
if n == 1 {
return singular
}
return plural
}
func FileExists(filePath string) bool {
_, e := os.Stat(filePath) | return e == nil
} | random_line_split |
|
logg.go | , false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
}
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func | (hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
get | AddFilteredSubsystem | identifier_name |
logg.go | Writer(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(joinStrings(" ", a...)),
),
)
}
}
}
func _f(level int32, subsystem string) func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(fmt.Sprintf(format, a...)),
),
)
}
}
}
func _s(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%s%s%s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(
" spew:",
),
fmt.Sprint(
color.Bit24(20, 20, 20, true).Sprint("\n\n"+spew.Sdump(a)),
"\n",
),
),
)
}
}
}
func _c(level int32, subsystem string) func(closure func() string) {
return func(closure func() string) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
),
AppColorizer(closure()),
),
)
}
}
}
func _chk(level int32, subsystem string) func(e error) bool | {
return func(e error) bool {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
if e != nil {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
getTimeText(level),
color.Bit24(20, 20, 20, true).
Sprint(AppColorizer(" "+App)),
LevelSpecs[level].Colorizer(
color.Bit24(20, 20, 20, true).
Sprint(" "+LevelSpecs[level].Name+" "),
), | identifier_body |
|
logg.go | , false).Sprintf},
{logLevels.Error, "error", color.Bit24(255, 0, 0, false).Sprintf},
{logLevels.Check, "check", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Warn, "warn ", color.Bit24(0, 255, 0, false).Sprintf},
{logLevels.Info, "info ", color.Bit24(255, 255, 0, false).Sprintf},
{logLevels.Debug, "debug", color.Bit24(0, 128, 255, false).Sprintf},
{logLevels.Trace, "trace", color.Bit24(128, 0, 255, false).Sprintf},
}
Levels = []string{
Off,
Fatal,
Error,
Check,
Warn,
Info,
Debug,
Trace,
}
LogChanDisabled = uberatomic.NewBool(true)
LogChan chan Entry
)
const (
Off = "off"
Fatal = "fatal"
Error = "error"
Warn = "warn"
Info = "info"
Check = "check"
Debug = "debug"
Trace = "trace"
)
// AddLogChan adds a channel that log entries are sent to
func AddLogChan() (ch chan Entry) {
LogChanDisabled.Store(false)
if LogChan != nil {
panic("warning warning")
}
// L.Writer.Write.Store( false
LogChan = make(chan Entry)
return LogChan
}
// GetLogPrinterSet returns a set of LevelPrinter with their subsystem preloaded
func GetLogPrinterSet(subsystem string) (Fatal, Error, Warn, Info, Debug, Trace LevelPrinter) {
return _getOnePrinter(_Fatal, subsystem),
_getOnePrinter(_Error, subsystem),
_getOnePrinter(_Warn, subsystem),
_getOnePrinter(_Info, subsystem),
_getOnePrinter(_Debug, subsystem),
_getOnePrinter(_Trace, subsystem)
}
func _getOnePrinter(level int32, subsystem string) LevelPrinter {
return LevelPrinter{
Ln: _ln(level, subsystem),
F: _f(level, subsystem),
S: _s(level, subsystem),
C: _c(level, subsystem),
Chk: _chk(level, subsystem),
}
}
// SetLogLevel sets the log level via a string, which can be truncated down to
// one character, similar to nmcli's argument processor, as the first letter is
// unique. This could be used with a linter to make larger command sets.
func SetLogLevel(l string) {
if l == "" {
l = "info"
}
// fmt.Fprintln(os.Stderr, "setting log level", l)
lvl := logLevels.Info
for i := range LevelSpecs {
if LevelSpecs[i].Name[:1] == l[:1] {
lvl = LevelSpecs[i].ID
}
}
currentLevel.Store(lvl)
}
// SetLogWriter atomically changes the log io.Writer interface
func SetLogWriter(wr io.Writer) {
// w := unsafe.Pointer(writer)
// c := unsafe.Pointer(wr)
// atomic.SwapPointer(&w, c)
writer = wr
}
func SetLogWriteToFile(path, appName string) (e error) {
// copy existing log file to dated log file as we will truncate it per
// session
path = filepath.Join(path, "log"+appName)
if _, e = os.Stat(path); e == nil |
var fileWriter *os.File
if fileWriter, e = os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC,
0600); e != nil {
fmt.Fprintln(os.Stderr, "unable to write log to", path, "error:", e)
return
}
mw := io.MultiWriter(os.Stderr, fileWriter)
fileWriter.Write([]byte("logging to file '" + path + "'\n"))
mw.Write([]byte("logging to file '" + path + "'\n"))
SetLogWriter(mw)
return
}
// SortSubsystemsList sorts the list of subsystems, to keep the data read-only,
// call this function right at the top of the main, which runs after
// declarations and main/init. Really this is just here to alert the reader.
func SortSubsystemsList() {
sort.Strings(allSubsystems)
// fmt.Fprintln(
// os.Stderr,
// spew.Sdump(allSubsystems),
// spew.Sdump(highlighted),
// spew.Sdump(logFilter),
// )
}
// AddLoggerSubsystem adds a subsystem to the list of known subsystems and returns the
// string so it is nice and neat in the package logg.go file
func AddLoggerSubsystem(pathBase string) (subsystem string) {
// var split []string
var ok bool
var file string
_, file, _, ok = runtime.Caller(1)
if ok {
r := strings.Split(file, pathBase)
// fmt.Fprintln(os.Stderr, version.PathBase, r)
fromRoot := filepath.Base(file)
if len(r) > 1 {
fromRoot = r[1]
}
split := strings.Split(fromRoot, "/")
// fmt.Fprintln(os.Stderr, version.PathBase, "file", file, r, fromRoot, split)
subsystem = strings.Join(split[:len(split)-1], "/")
// fmt.Fprintln(os.Stderr, "adding subsystem", subsystem)
allSubsystems = append(allSubsystems, subsystem)
}
return
}
// StoreHighlightedSubsystems sets the list of subsystems to highlight
func StoreHighlightedSubsystems(highlights []string) (found bool) {
highlightMx.Lock()
highlighted = make(map[string]struct{}, len(highlights))
for i := range highlights {
highlighted[highlights[i]] = struct{}{}
}
highlightMx.Unlock()
return
}
// LoadHighlightedSubsystems returns a copy of the map of highlighted subsystems
func LoadHighlightedSubsystems() (o []string) {
highlightMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
highlightMx.Unlock()
sort.Strings(o)
return
}
// StoreSubsystemFilter sets the list of subsystems to filter
func StoreSubsystemFilter(filter []string) {
_logFilterMx.Lock()
logFilter = make(map[string]struct{}, len(filter))
for i := range filter {
logFilter[filter[i]] = struct{}{}
}
_logFilterMx.Unlock()
}
// LoadSubsystemFilter returns a copy of the map of filtered subsystems
func LoadSubsystemFilter() (o []string) {
_logFilterMx.Lock()
o = make([]string, len(logFilter))
var counter int
for i := range logFilter {
o[counter] = i
counter++
}
_logFilterMx.Unlock()
sort.Strings(o)
return
}
// _isHighlighted returns true if the subsystem is in the list to have attention
// getters added to them
func _isHighlighted(subsystem string) (found bool) {
highlightMx.Lock()
_, found = highlighted[subsystem]
highlightMx.Unlock()
return
}
// AddHighlightedSubsystem adds a new subsystem Name to the highlighted list
func AddHighlightedSubsystem(hl string) struct{} {
highlightMx.Lock()
highlighted[hl] = struct{}{}
highlightMx.Unlock()
return struct{}{}
}
// _isSubsystemFiltered returns true if the subsystem should not pr logs
func _isSubsystemFiltered(subsystem string) (found bool) {
_logFilterMx.Lock()
_, found = logFilter[subsystem]
_logFilterMx.Unlock()
return
}
// AddFilteredSubsystem adds a new subsystem Name to the highlighted list
func AddFilteredSubsystem(hl string) struct{} {
_logFilterMx.Lock()
logFilter[hl] = struct{}{}
_logFilterMx.Unlock()
return struct{}{}
}
func getTimeText(level int32) string {
// since := time.Now().Sub(logger_started).Round(time.Millisecond).String()
// diff := 12 - len(since)
// if diff > 0 {
// since = strings.Repeat(" ", diff) + since + " "
// }
return color.Bit24(99, 99, 99, false).Sprint(time.Now().
Format(time.StampMilli))
}
func _ln(level int32, subsystem string) func(a ...interface{}) {
return func(a ...interface{}) {
if level <= currentLevel.Load() && !_isSubsystemFiltered(subsystem) {
printer := fmt.Sprintf
if _isHighlighted(subsystem) {
printer = color.Bold.Sprintf
}
fmt.Fprintf(
writer,
printer(
"%-58v%s%s%-6v %s\n",
getLoc(2, level, subsystem),
| {
var b []byte
b, e = ioutil.ReadFile(path)
if e == nil {
ioutil.WriteFile(path+fmt.Sprint(time.Now().Unix()), b, 0600)
}
} | conditional_block |
cogroup.rs | ) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact | {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source2.set_difference(index.time(), compact);
} | conditional_block |
|
cogroup.rs | for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>(); | if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time). | vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
| random_line_split |
cogroup.rs | for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn | <
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
// create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time | cogroup_by_inner | identifier_name |
cogroup.rs | for each key.
//!
//! ```ignore
//! stream.group(|key, vals, output| {
//! let (mut max_val, mut max_wgt) = vals.peek().unwrap();
//! for (val, wgt) in vals {
//! if wgt > max_wgt {
//! max_wgt = wgt;
//! max_val = val;
//! }
//! }
//! output.push((max_val.clone(), max_wgt));
//! })
//! ```
use std::rc::Rc;
use std::default::Default;
use std::hash::Hasher;
use std::ops::DerefMut;
use itertools::Itertools;
use ::{Collection, Data};
use timely::dataflow::*;
use timely::dataflow::operators::{Map, Binary};
use timely::dataflow::channels::pact::Exchange;
use timely_sort::{LSBRadixSorter, Unsigned};
use collection::{LeastUpperBound, Lookup, Trace, Offset};
use collection::trace::{CollectionIterator, DifferenceIterator, Traceable};
use iterators::coalesce::Coalesce;
use collection::compact::Compact;
/// Extension trait for the `group_by` and `group_by_u` differential dataflow methods.
pub trait CoGroupBy<G: Scope, K: Data, V1: Data> where G::Timestamp: LeastUpperBound {
/// A primitive binary version of `group_by`, which acts on a `Collection<G, (K, V1)>` and a `Collection<G, (K, V2)>`.
///
/// The two streams must already be key-value pairs, which is too bad. Also, in addition to the
/// normal arguments (another stream, a hash for the key, a reduction function, and per-key logic),
/// the user must specify a function implmenting `Fn(u64) -> Look`, where `Look: Lookup<K, Offset>` is something you shouldn't have to know about yet.
/// The right thing to use here, for the moment, is `|_| HashMap::new()`.
///
/// There are better options if you know your key is an unsigned integer, namely `|x| (Vec::new(), x)`.
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<DifferenceIterator<V1>>, &mut CollectionIterator<DifferenceIterator<V2>>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D>;
}
impl<G: Scope, K: Data, V1: Data> CoGroupBy<G, K, V1> for Collection<G, (K, V1)>
where G::Timestamp: LeastUpperBound {
fn cogroup_by_inner<
D: Data,
V2: Data+Default,
V3: Data+Default,
U: Unsigned+Default,
KH: Fn(&K)->U+'static,
Look: Lookup<K, Offset>+'static,
LookG: Fn(u64)->Look,
Logic: Fn(&K, &mut CollectionIterator<V1>, &mut CollectionIterator<V2>, &mut Vec<(V3, i32)>)+'static,
Reduc: Fn(&K, &V3)->D+'static,
>
(&self, other: &Collection<G, (K, V2)>, key_h: KH, reduc: Reduc, look: LookG, logic: Logic) -> Collection<G, D> | // create an exchange channel based on the supplied Fn(&D1)->u64.
let exch1 = Exchange::new(move |&((ref k, _),_)| key_1(k).as_u64());
let exch2 = Exchange::new(move |&((ref k, _),_)| key_2(k).as_u64());
let mut sorter1 = LSBRadixSorter::new();
let mut sorter2 = LSBRadixSorter::new();
// fabricate a data-parallel operator using the `unary_notify` pattern.
Collection::new(self.inner.binary_notify(&other.inner, exch1, exch2, "CoGroupBy", vec![], move |input1, input2, output, notificator| {
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input1.next() {
inputs1.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 1. read each input, and stash it in our staging area
while let Some((time, data)) = input2.next() {
inputs2.entry_or_insert(time.time(), || Vec::new())
.push(::std::mem::replace(data.deref_mut(), Vec::new()));
notificator.notify_at(time);
}
// 2. go through each time of interest that has reached completion
// times are interesting either because we received data, or because we conclude
// in the processing of a time that a future time will be interesting.
while let Some((index, _count)) = notificator.next() {
let mut stash = Vec::new();
panic!("interesting times needs to do LUB of union of times for each key, input");
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs1.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter1.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter1.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter1.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source1.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time).clone(), || { notificator.notify_at(index.delayed(time)); Vec::new() });
queue.push((*key).clone());
}
stash.clear();
}
source1.set_difference(index.time(), compact);
}
}
// 2a. fetch any data associated with this time.
if let Some(mut queue) = inputs2.remove_key(&index) {
// sort things; radix if many, .sort_by if few.
let compact = if queue.len() > 1 {
for element in queue.into_iter() {
sorter2.extend(element.into_iter(), &|x| key_h(&(x.0).0));
}
let mut sorted = sorter2.finish(&|x| key_h(&(x.0).0));
let result = Compact::from_radix(&mut sorted, &|k| key_h(k));
sorted.truncate(256);
sorter2.recycle(sorted);
result
}
else {
let mut vec = queue.pop().unwrap();
let mut vec = vec.drain(..).collect::<Vec<_>>();
vec.sort_by(|x,y| key_h(&(x.0).0).cmp(&key_h((&(y.0).0))));
Compact::from_radix(&mut vec![vec], &|k| key_h(k))
};
if let Some(compact) = compact {
for key in &compact.keys {
stash.push(index.clone());
source2.interesting_times(key, &index, &mut stash);
for time in &stash {
let mut queue = to_do.entry_or_insert((*time). | {
let mut source1 = Trace::new(look(0));
let mut source2 = Trace::new(look(0));
let mut result = Trace::new(look(0));
// A map from times to received (key, val, wgt) triples.
let mut inputs1 = Vec::new();
let mut inputs2 = Vec::new();
// A map from times to a list of keys that need processing at that time.
let mut to_do = Vec::new();
// temporary storage for operator implementations to populate
let mut buffer = vec![];
let key_h = Rc::new(key_h);
let key_1 = key_h.clone();
let key_2 = key_h.clone();
| identifier_body |
create-cp-input.py | _SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def | (docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map | get_api_config | identifier_name |
create-cp-input.py | import os
CONST_TIMESTAMP = 'timestamp'
CONST_NAME = 'name'
CONST_PARTITIONS = 'partitions'
CONST_REPLICATION = 'replication'
CONST_OVERRIDE = 'override'
CONST_DEPENDENCIES = 'dependencies'
CONST_KSQL = 'ksql'
CONST_CONNECT = 'connect'
CONST_TOPIC = 'topic'
CONST_TOPICS = 'topics'
CONST_BROKER = 'broker'
CONST_PROVISION = 'provision'
CONST_CONNECTORS = 'connectors'
CONST_DESCRIPTION = 'description'
CONST_QUERIES = 'queries'
CONST_HOSTS = 'hosts'
CONST_PLUGINS = 'plugins'
CONST_PLUGINS_HUB = 'hub'
CONST_PLUGINS_LOCAL = 'local'
CONST_PLUGINS_REMOTE = 'remote'
CONST_CLUSTERDATA = 'cluster_data'
CONST_SSH_USER = 'ssh_username'
CONST_SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET | from jinja2 import Template
import yaml
import json
import logging
import requests | random_line_split |
|
create-cp-input.py | _SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
|
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[ | hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts | identifier_body |
create-cp-input.py | _SSH_KEY = 'ssh_key'
CONST_KSQL_DEST_DIR = '/var/lib/kafka/ksql'
CONST_SOURCE_PATH = 'source_path'
CONST_DEST_PATH = 'destination_path'
CONST_BOOTSTRAP_SERVERS = 'bootstrap_servers'
CONST_API_KEY = 'api_key'
CONST_API_SECRET = 'api_secret'
CONST_ADMIN = 'admin'
CONST_CONSUMER = 'consumer'
CONST_PRODUCER = 'producer'
CONST_SR = 'schema_registry'
CONST_KSQL = 'ksql'
CONST_URL = 'url'
CONST_CREDENTIALS = 'credentials'
CONST_ENV = 'env'
CONST_CONTEXT = 'context'
CONST_COMPANY = 'company'
CONST_PROJECTS = 'projects'
CONST_SOURCE = 'source'
inputs_map = {CONST_TIMESTAMP: '', CONST_CONTEXT: 'test_context', CONST_COMPANY: 'test_company', CONST_ENV: 'test_env', CONST_SOURCE: 'test_source', CONST_PROJECTS: [], CONST_BOOTSTRAP_SERVERS: '', CONST_CONNECT: [], CONST_CONSUMER: [], CONST_PRODUCER: [], CONST_CLUSTERDATA: {CONST_SSH_USER: 'TODO', CONST_SSH_KEY: 'TODO'}, CONST_KSQL + '_' + CONST_QUERIES: [], CONST_KSQL + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_HOSTS: [], CONST_CONNECT + '_' + CONST_CONNECTORS: [], CONST_CONNECT + '_' + CONST_PLUGINS: []}
def create_template(temp_file):
with open(temp_file) as f:
temp = f.read()
f.close()
return Template(temp)
def render_template (input_map, input_template, output_file):
with open(output_file, "w+") as f:
print (input_template.render(input_map), file=f)
f.close()
# Identify topic name, replication factor, and partitions by topic
def process_topic_item (feid, project, topic_item, override_part, override_repl):
name = topic_item[CONST_NAME]
fqname = name
if CONST_PARTITIONS in topic_item:
use_part = topic_item[CONST_PARTITIONS]
elif override_part != 0:
use_part = override_part
else:
use_part = 1
if CONST_REPLICATION in topic_item:
use_repl = topic_item[CONST_REPLICATION]
elif override_repl != 0:
use_repl = override_repl
else:
use_repl = 1
topic = {}
topic [CONST_NAME] = fqname
topic [CONST_REPLICATION] = use_repl
topic [CONST_PARTITIONS] = use_part
return topic
# Create Julieops descriptor file
def process_broker (feid, doc):
logging.debug ('-------')
if CONST_TOPIC in doc and CONST_OVERRIDE in doc[CONST_TOPIC]:
override = doc[CONST_TOPIC][CONST_OVERRIDE]
if CONST_PARTITIONS in override:
override_part = override[CONST_PARTITIONS]
if CONST_REPLICATION in override:
override_repl = override[CONST_REPLICATION]
logging.info ('partition = ' + str(override_part) + ', replication = ' + str(override_repl))
if CONST_DEPENDENCIES not in doc[CONST_TOPIC]:
logging.info ('No dependency topics')
for dependency in doc[CONST_TOPIC][CONST_DEPENDENCIES]:
process_topic_item (feid, feid, dependency, override_part, override_repl)
if CONST_TOPICS not in doc[CONST_TOPIC]:
logging.info ('No topics to provision')
return
topics = []
lists = doc[CONST_TOPIC][CONST_TOPICS]
for item in lists:
topic = process_topic_item (feid, feid, item, override_part, override_repl)
topics.append(topic)
logging.debug(topic)
projects = []
project = {}
project[CONST_NAME] = feid
project[CONST_TOPICS] = topics
projects.append(project)
inputs_map[CONST_PROJECTS] = projects
def provision_ksql_query (feid, doc):
ksql_files = []
for query_file in doc:
ksql_file = {}
ksql_file[CONST_SOURCE_PATH] = query_file
ksql_file[CONST_DEST_PATH] = CONST_KSQL_DEST_DIR + os.path.sep + os.path.basename(query_file)
ksql_files.append(ksql_file)
inputs_map[CONST_KSQL + '_' + CONST_QUERIES] = ksql_files
def provision_ksql_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('ksql host is ' + host)
hosts.append(host)
inputs_map[CONST_KSQL + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with ksql section
def process_ksql (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_ksql_hosts (feid, doc[CONST_HOSTS])
provision_ksql_query (feid, doc[CONST_QUERIES])
logging.debug ('-------')
def provision_connect_plugins (feid, doc, plugin_type):
plugins = []
for plugin in doc:
logging.info ('Connect Plugin ' + plugin_type + ' is ' + plugin)
plugins.append(plugin)
inputs_map[CONST_CONNECT + '_' + CONST_PLUGINS + "_" + plugin_type] = plugins
def provision_connect_connectors (feid, doc):
connectors = []
connectors_json = []
for connector in doc:
connectors.append(connector)
f = open(connector, 'r')
data = json.load(f)
f.close()
name = data['name']
config = data['config']
config2 = []
for item in config:
config2.append(item + " : " + str(config[item]))
data2 = {'name': name, 'config': config2}
connectors_json.append(data2)
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS] = connectors
inputs_map[CONST_CONNECT + '_' + CONST_CONNECTORS + '_' + 'json'] = connectors_json
def provision_connect_hosts (feid, doc):
hosts = []
for host in doc:
logging.info ('Connect host is ' + host)
hosts.append(host)
inputs_map[CONST_CONNECT + '_' + CONST_HOSTS] = hosts
# Create cp-ansible yaml with connect section
def process_connect (feid, doc):
logging.debug ('-------')
if CONST_PROVISION in doc and doc[CONST_PROVISION] == True:
provision_connect_hosts (feid, doc[CONST_HOSTS])
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_HUB], CONST_PLUGINS_HUB)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_LOCAL], CONST_PLUGINS_LOCAL)
provision_connect_plugins (feid, doc[CONST_PLUGINS][CONST_PLUGINS_REMOTE], CONST_PLUGINS_REMOTE)
provision_connect_connectors (feid, doc[CONST_CONNECTORS])
logging.debug ('-------')
def process (doc, args):
inputs_map[CONST_TIMESTAMP] = datetime.now()
fe_id = doc[CONST_NAME]
inputs_map[CONST_NAME] = fe_id
output_ansible = fe_id + ".ansible.yaml"
output_julie = fe_id + ".julieops.yaml"
output_cluster = fe_id + ".cluster.properties"
template_ansible = create_template (args.ansibletemplate)
template_julie = create_template (args.julietemplate)
template_cluster = create_template (args.brokertemplate)
logging.info("Feature name is " + fe_id)
logging.info("Ansible YAML is " + output_ansible + ", Template is " + args.ansibletemplate)
logging.info("Julieops YAML is " + output_julie + ", Template is " + args.julietemplate)
process_broker (doc[CONST_NAME], doc[CONST_BROKER])
process_ksql (doc[CONST_NAME], doc[CONST_KSQL])
process_connect (doc[CONST_NAME], doc[CONST_CONNECT])
render_template (inputs_map, template_ansible, output_ansible)
render_template (inputs_map, template_julie, output_julie)
render_template (inputs_map, template_cluster, output_cluster)
def get_api_config(docs, config_type, override_apikey, override_apisecret):
newdocs = {}
if config_type in docs and CONST_API_KEY in docs[config_type]:
newdocs[CONST_API_KEY] = docs[config_type][CONST_API_KEY]
newdocs[CONST_API_SECRET] = docs[config_type][CONST_API_SECRET]
else:
|
return newdocs
def process_ccloud_config (docs):
override_apikey = ""
override_apisecret = ""
if CONST_OVERRIDE in docs[CONST_CREDENTIALS]:
override = docs[CONST_CREDENTIALS][CONST_OVERRIDE]
if CONST_API_KEY in override:
override_apikey = override[CONST_API_KEY]
if CONST_API_SECRET in override:
override_apisecret = override[CONST_API_SECRET]
logging.debug ('REMOVE THIS api key = ' + str(override_apikey) + ', secret = ' + str(override_apisecret))
inputs_map[CONST_BOOTSTRAP_SERVERS] = docs[CONST_BOOTSTRAP_SERVERS]
inputs_map[CONST_ADMIN] = get_api_config (docs[CONST_CREDENTIALS], CONST_ADMIN, override_apikey, override_apisecret)
inputs_map[ | newdocs[CONST_API_KEY] = override_apikey
newdocs[CONST_API_SECRET] = override_apisecret | conditional_block |
mod.rs | This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn | (&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` | as_ptr | identifier_name |
mod.rs | Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result<KvmRunWrapper> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
0,
)
};
if addr == libc::MAP_FAILED | {
return Err(io::Error::last_os_error());
} | conditional_block |
|
mod.rs | This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
}
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] |
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size | {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
} | identifier_body |
mod.rs | This typedef is generally used to avoid writing out io::Error directly and
/// is otherwise a direct mapping to Result.
pub type Result<T> = result::Result<T, io::Error>;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// Wrapper over the `kvm_cpuid2` structure.
///
/// The structure has a zero length array at the end, hidden behind bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(test)]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
/// # Example
///
/// ```
/// use kvm_ioctls::CpuId;
/// let cpu_id = CpuId::new(32);
/// ```
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`.
///
/// # Arguments
///
/// * `entries` - The vector of `kvm_cpuid_entry2` entries.
///
/// # Example
///
/// ```rust
/// # extern crate kvm_ioctls;
/// extern crate kvm_bindings;
///
/// use kvm_bindings::kvm_cpuid_entry2;
/// use kvm_ioctls::CpuId;
/// // Create a Cpuid to hold one entry.
/// let mut cpuid = CpuId::new(1);
/// let mut entries = cpuid.mut_entries_slice().to_vec();
/// let new_entry = kvm_cpuid_entry2 {
/// function: 0x4,
/// index: 0,
/// flags: 1,
/// eax: 0b1100000,
/// ebx: 0,
/// ecx: 0,
/// edx: 0,
/// padding: [0, 0, 0],
/// };
/// entries.insert(0, new_entry);
/// cpuid = CpuId::from_entries(&entries);
/// ```
///
pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(entries.len());
kvm_cpuid[0].nent = entries.len() as u32;
unsafe {
kvm_cpuid[0]
.entries
.as_mut_slice(entries.len())
.copy_from_slice(entries);
} |
CpuId {
kvm_cpuid,
allocated_len: entries.len(),
}
}
/// Returns the mutable entries slice so they can be modified before passing to the VCPU.
///
/// # Example
/// ```rust
/// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES};
/// let kvm = Kvm::new().unwrap();
/// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
/// let cpuid_entries = cpuid.mut_entries_slice();
/// ```
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
/// Safe wrapper over the `kvm_run` struct.
///
/// The wrapper is needed for sending the pointer to `kvm_run` between
/// threads as raw pointers do not implement `Send` and `Sync`.
pub struct KvmRunWrapper {
kvm_run_ptr: *mut u8,
// This field is need so we can `munmap` the memory mapped to hold `kvm_run`.
mmap_size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for KvmRunWrapper {}
unsafe impl Sync for KvmRunWrapper {}
impl KvmRunWrapper {
/// Maps the first `size` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` | random_line_split |
|
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn | (
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| get_fingerprint_of_file | identifier_name |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) | .exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self | identifier_body |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
let mut backoff_cap: usize = 1;
let mut lines = Vec::new();
let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else | if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
}
| {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) { | conditional_block |
file_server.rs | use crate::file_watcher::FileWatcher;
use bytes::Bytes;
use futures::{stream, Future, Sink, Stream};
use glob::{glob, Pattern};
use std::collections::HashMap;
use std::fs;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use std::sync::mpsc::RecvTimeoutError;
use std::time;
use tracing::field;
/// `FileServer` is a Source which cooperatively schedules reads over files,
/// converting the lines of said files into `LogLine` structures. As
/// `FileServer` is intended to be useful across multiple operating systems with
/// POSIX filesystem semantics `FileServer` must poll for changes. That is, no
/// event notification is used by `FileServer`.
///
/// `FileServer` is configured on a path to watch. The files do _not_ need to
/// exist at cernan startup. `FileServer` will discover new files which match
/// its path in at most 60 seconds.
pub struct FileServer {
pub include: Vec<PathBuf>,
pub exclude: Vec<PathBuf>,
pub max_read_bytes: usize,
pub start_at_beginning: bool,
pub ignore_before: Option<time::SystemTime>,
pub max_line_bytes: usize,
pub fingerprint_bytes: usize,
pub ignored_header_bytes: usize,
}
type FileFingerprint = u64;
/// `FileServer` as Source
///
/// The 'run' of `FileServer` performs the cooperative scheduling of reads over
/// `FileServer`'s configured files. Much care has been taking to make this
/// scheduling 'fair', meaning busy files do not drown out quiet files or vice
/// versa but there's no one perfect approach. Very fast files _will_ be lost if
/// your system aggressively rolls log files. `FileServer` will keep a file
/// handler open but should your system move so quickly that a file disappears
/// before cernan is able to open it the contents will be lost. This should be a
/// rare occurence.
///
/// Specific operating systems support evented interfaces that correct this
/// problem but your intrepid authors know of no generic solution.
impl FileServer {
pub fn run(
self,
mut chans: impl Sink<SinkItem = (Bytes, String), SinkError = ()>,
shutdown: std::sync::mpsc::Receiver<()>,
) {
let mut line_buffer = Vec::new();
let mut fingerprint_buffer = Vec::new();
let mut fp_map: HashMap<FileFingerprint, FileWatcher> = Default::default();
| let mut start_of_run = true;
// Alright friends, how does this work?
//
// We want to avoid burning up users' CPUs. To do this we sleep after
// reading lines out of files. But! We want to be responsive as well. We
// keep track of a 'backoff_cap' to decide how long we'll wait in any
// given loop. This cap grows each time we fail to read lines in an
// exponential fashion to some hard-coded cap.
loop {
let mut global_bytes_read: usize = 0;
// glob poll
let exclude_patterns = self
.exclude
.iter()
.map(|e| Pattern::new(e.to_str().expect("no ability to glob")).unwrap())
.collect::<Vec<_>>();
for (_file_id, watcher) in &mut fp_map {
watcher.set_file_findable(false); // assume not findable until found
}
for include_pattern in &self.include {
for entry in glob(include_pattern.to_str().expect("no ability to glob"))
.expect("Failed to read glob pattern")
{
if let Ok(path) = entry {
if exclude_patterns
.iter()
.any(|e| e.matches(path.to_str().unwrap()))
{
continue;
}
if let Some(file_id) =
self.get_fingerprint_of_file(&path, &mut fingerprint_buffer)
{
if let Some(watcher) = fp_map.get_mut(&file_id) {
// file fingerprint matches a watched file
let was_found_this_cycle = watcher.file_findable();
watcher.set_file_findable(true);
if watcher.path == path {
trace!(
message = "Continue watching file.",
path = field::debug(&path),
);
} else {
// matches a file with a different path
if !was_found_this_cycle {
info!(
message = "Watched file has been renamed.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
} else {
info!(
message = "More than one file has same fingerprint.",
path = field::debug(&path),
old_path = field::debug(&watcher.path)
);
let (old_path, new_path) = (&watcher.path, &path);
if let (Ok(old_modified_time), Ok(new_modified_time)) = (
fs::metadata(&old_path).and_then(|m| m.modified()),
fs::metadata(&new_path).and_then(|m| m.modified()),
) {
if old_modified_time < new_modified_time {
info!(
message = "Switching to watch most recently modified file.",
new_modified_time = field::debug(&new_modified_time),
old_modified_time = field::debug(&old_modified_time),
);
watcher.update_path(path).ok(); // ok if this fails: might fix next cycle
}
}
}
}
} else {
// unknown (new) file fingerprint
let read_file_from_beginning = if start_of_run {
self.start_at_beginning
} else {
true
};
if let Ok(mut watcher) = FileWatcher::new(
path,
read_file_from_beginning,
self.ignore_before,
) {
info!(
message = "Found file to watch.",
path = field::debug(&watcher.path),
start_at_beginning = field::debug(&self.start_at_beginning),
start_of_run = field::debug(&start_of_run),
);
watcher.set_file_findable(true);
fp_map.insert(file_id, watcher);
};
}
}
}
}
}
// line polling
for (_file_id, watcher) in &mut fp_map {
let mut bytes_read: usize = 0;
while let Ok(sz) = watcher.read_line(&mut line_buffer, self.max_line_bytes) {
if sz > 0 {
trace!(
message = "Read bytes.",
path = field::debug(&watcher.path),
bytes = field::debug(sz)
);
bytes_read += sz;
if !line_buffer.is_empty() {
lines.push((
line_buffer.clone().into(),
watcher.path.to_str().expect("not a valid path").to_owned(),
));
line_buffer.clear();
}
} else {
break;
}
if bytes_read > self.max_read_bytes {
break;
}
}
global_bytes_read = global_bytes_read.saturating_add(bytes_read);
}
// A FileWatcher is dead when the underlying file has disappeared.
// If the FileWatcher is dead we don't retain it; it will be deallocated.
fp_map.retain(|_file_id, watcher| !watcher.dead());
match stream::iter_ok::<_, ()>(lines.drain(..))
.forward(chans)
.wait()
{
Ok((_, sink)) => chans = sink,
Err(_) => unreachable!("Output channel is closed"),
}
// When no lines have been read we kick the backup_cap up by twice,
// limited by the hard-coded cap. Else, we set the backup_cap to its
// minimum on the assumption that next time through there will be
// more lines to read promptly.
if global_bytes_read == 0 {
let lim = backoff_cap.saturating_mul(2);
if lim > 2_048 {
backoff_cap = 2_048;
} else {
backoff_cap = lim;
}
} else {
backoff_cap = 1;
}
let backoff = backoff_cap.saturating_sub(global_bytes_read);
match shutdown.recv_timeout(time::Duration::from_millis(backoff as u64)) {
Ok(()) => unreachable!(), // The sender should never actually send
Err(RecvTimeoutError::Timeout) => {}
Err(RecvTimeoutError::Disconnected) => return,
}
start_of_run = false;
}
}
fn get_fingerprint_of_file(
&self,
path: &PathBuf,
buffer: &mut Vec<u8>,
) -> Option<FileFingerprint> {
let i = self.ignored_header_bytes as u64;
let b = self.fingerprint_bytes;
buffer.resize(b, 0u8);
if let Ok(mut fp) = fs::File::open(path) {
if fp.seek(SeekFrom::Start(i)).is_ok() && fp.read_exact(&mut buffer[..b]).is_ok() {
let fingerprint = crc::crc64::checksum_ecma(&buffer[..b]);
Some(fingerprint)
} else {
None
}
} else {
None
}
}
} | let mut backoff_cap: usize = 1;
let mut lines = Vec::new(); | random_line_split |
BAT.py | _a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array
return XYZ
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
p2 = XYZ[a2]
p3 = XYZ[a3]
p4 = XYZ[a4]
# circle = sphere.intersectWith(cone)
n23 = normalize(p3 - p2)
# points = circle.intersectWith(plane123)
# plane.intersectWith(Plane(circle.center, circle.normal)) is a line
# line_direction = cross(normalize(cross(p4-p3,n23)),n23)
# Rotate the point about the p2-p3 axis by the torsion angle
v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross(
normalize(cross(p4 - p3, n23)), n23)
s = np.sin(torsion)
c = np.cos(torsion)
XYZ[a1] = p2 - cross(n23, v21) * s + np.sum(
n23 * v21) * n23 * (1.0 - c) + v21 * c
def showMolecule(self, colorBy=None, label=False, dcdFN=None):
"""
Opens the molecule in VMD | :param colorBy: color atoms by 'Occupancy', or 'Beta'. None uses default colors.
""" | random_line_split |
|
BAT.py | atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
|
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), | """
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions]) | identifier_body |
BAT.py |
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def getFirstTorsionInds(self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
| sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle)
plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2]))
points = sphere.intersectWith(cone).intersectWith(plane123)
p = points[0] if (Plane(Vector(XYZ[a3]), Vector(
XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1]
p = rotatePoint(Vector(p),
Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])),
torsion)
XYZ[a1] = p.array | conditional_block |
|
BAT.py | atom: atom.mass()
terminal_atoms = sorted(\
[a for a in self.molecule.atoms if len(a.bondedTo())==1], key=atom_name)
terminal_atoms = sorted(terminal_atoms, key=atom_mass)
if (initial_atom is None):
# Select the heaviest root atoms from the heaviest terminal atom
root = [terminal_atoms[-1]]
else:
if initial_atom in terminal_atoms:
root = [initial_atom]
else:
raise Exception('Initial atom is not a terminal atom')
self.initial_atom = initial_atom
attached_to_zero = sorted(root[0].bondedTo(), key=atom_name)
attached_to_zero = sorted(attached_to_zero, key=atom_mass)
root.append(attached_to_zero[-1])
attached_to_one = sorted([a for a in root[-1].bondedTo() \
if (a not in root) and (a not in terminal_atoms)], key=atom_name)
attached_to_one = sorted(attached_to_one, key=atom_mass)
root.append(attached_to_one[-1])
def _find_dihedral(selected):
"""
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
:param selected: a list of atoms that have already been selected
:returns: a list of atoms that includes the new atom and its neighboring selected atoms
"""
atom_name = lambda atom: atom.fullName()
atom_mass = lambda atom: atom.mass()
# Loop over possible nearest neighbors
for a2 in selected:
# Find the new atom
attached_to_a2 = sorted([a for a in a2.bondedTo() \
if a not in selected], key=atom_name)
for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True):
# Find the third atom
attached_to_a3 = sorted([a for a in a2.bondedTo() \
if (a in selected) and (a!=a1)], key=atom_name)
for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True):
# Find the last atom
attached_to_a4 = sorted([a for a in a3.bondedTo() \
if (a in selected) and (a!=a2)], key=atom_name)
for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True):
return (a1, a2, a3, a4)
print 'Selected atoms:', selected
raise Exception('No new dihedral angle found!')
# Construct a list of torsion angles
torsionL = []
selected = [a for a in root]
while len(selected) < self.universe.numberOfAtoms():
(a1, a2, a3, a4) = _find_dihedral(selected)
torsionL.append((a1, a2, a3, a4))
selected.append(a1)
# If _firstTorsionTInd is not equal to the list index,
# then the dihedrals will likely be correlated and it is more appropriate
# to use a relative phase angle
prior_atoms = [
sorted([a2.index, a3.index]) for (a1, a2, a3, a4) in torsionL
]
self.rootInd = [r.index for r in root]
self._torsionIndL = [[a.index for a in tset] for tset in torsionL]
self._firstTorsionTInd = [prior_atoms.index(prior_atoms[n]) \
for n in range(len(prior_atoms))]
self.ntorsions = self.natoms - 3
def | (self, extended):
"""
Indices of the first torsions in the BAT array
"""
offset = 6 if extended else 0
torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))
primaryTorsions = sorted(list(set(self._firstTorsionTInd)))
return list(torsionInds[primaryTorsions])
def BAT(self, XYZ, extended=False):
"""
Conversion from Cartesian to Bond-Angle-Torsion coordinates
:param extended: whether to include external coordinates or not
:param Cartesian: Cartesian coordinates. If None, then the molecules' coordinates will be used
"""
root = [distance(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]]),\
distance(XYZ[self.rootInd[1]],XYZ[self.rootInd[2]]),\
angle(XYZ[self.rootInd[0]],XYZ[self.rootInd[1]],XYZ[self.rootInd[2]])]
import itertools
internal = root + \
[val for val in itertools.chain.from_iterable([\
BAT4(XYZ[a1],XYZ[a2],XYZ[a3],XYZ[a4]) \
for (a1,a2,a3,a4) in self._torsionIndL])]
torsions = internal[5::3]
phase_torsions = [(torsions[n] - torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else torsions[n] \
for n in range(len(torsions))]
internal[5::3] = phase_torsions
if not extended:
return np.array(internal)
external = self.extended_coordinates(XYZ[self.rootInd[0]], \
XYZ[self.rootInd[1]], XYZ[self.rootInd[2]])
return np.array(list(external) + list(internal))
def extended_coordinates(self, p1, p2, p3):
# The rotation axis is a normalized vector pointing from atom 0 to 1
# It is described in two degrees of freedom by the polar angle and azimuth
e = normalize(p2 - p1)
phi = np.arctan2(e[1], e[0]) # Polar angle
theta = np.arccos(e[2]) # Azimuthal angle
# Rotation to the z axis
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Rz = np.array([[cp * ct, ct * sp, -st], [-sp, cp, 0],
[cp * st, sp * st, ct]])
pos2 = Rz.dot(np.array(p3 - p2))
# Angle about the rotation axis
omega = np.arctan2(pos2[1], pos2[0])
return np.array(list(p1) + [phi, theta, omega])
def Cartesian(self, BAT):
"""
Conversion from (internal or extended) Bond-Angle-Torsion
to Cartesian coordinates
"""
# Arrange BAT coordinates in convenient arrays
offset = 6 if len(BAT) == (3 * self.natoms) else 0
bonds = BAT[offset + 3::3]
angles = BAT[offset + 4::3]
phase_torsions = BAT[offset + 5::3]
torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \
if self._firstTorsionTInd[n]!=n else phase_torsions[n] \
for n in range(self.ntorsions)]
p1 = np.array([0., 0., 0.])
p2 = np.array([0., 0., BAT[offset]])
p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \
BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])])
# If appropriate, rotate and translate the first three atoms
if offset == 6:
# Rotate the third atom by the appropriate value
(phi, theta, omega) = BAT[3:6]
co = np.cos(omega)
so = np.sin(omega)
Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]])
p3 = Romega.dot(p3)
# Rotate the second two atoms to point in the right direction
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st],
[-st, 0, ct]])
p2 = Re.dot(p2)
p3 = Re.dot(p3)
# Translate the first three atoms by the origin
origin = np.array(BAT[:3])
p1 += origin
p2 += origin
p3 += origin
XYZ = np.zeros((self.natoms, 3))
XYZ[self.rootInd[0]] = p1
XYZ[self.rootInd[1]] = p2
XYZ[self.rootInd[2]] = p3
for ((a1,a2,a3,a4), bond, angle, torsion) in \
zip(self._torsionIndL,bonds,angles,torsions):
sphere = Sphere(Vector(XYZ[a2]), bond)
cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), | getFirstTorsionInds | identifier_name |
gen_mike_input_rf_linux.py | ir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir = os.path.join(OUTPUT_DIRECTORY, (datetime.utcnow() + timedelta(hours=5, minutes=30)).strftime('%Y-%m-%d_%H-00-00'))
if file_name is None:
file_name = 'mike_rf.txt'.format(start_time, end_time)
mike_rf_file_path = os.path.join(output_dir, file_name)
if not os.path.isfile(mike_rf_file_path):
makedir_if_not_exist_given_filepath(mike_rf_file_path)
print("{} start preparing mike rainfall input".format(datetime.now()))
coefficients = pd.read_csv(os.path.join('inputs', 'params', 'sb_rf_coefficients.csv'), delimiter=',')
mike_rainfall = prepare_mike_rf_input(start=start_time, end=end_time, coefficients=coefficients)
mike_rainfall.to_csv(mike_rf_file_path, header=True, index=True) | print("{} completed preparing mike rainfall input".format(datetime.now()))
print("Mike input rainfall file is available at {}".format(mike_rf_file_path)) | random_line_split |
|
gen_mike_input_rf_linux.py | import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def append_to_file(file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
|
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir | usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText) | identifier_body |
gen_mike_input_rf_linux.py | import connection as con_params
from db_adapter.base import get_Pool, destroy_Pool
from db_adapter.constants import CURW_SIM_DATABASE, CURW_SIM_PASSWORD, CURW_SIM_USERNAME, CURW_SIM_PORT, CURW_SIM_HOST
from db_adapter.curw_sim.timeseries import Timeseries
from db_adapter.constants import COMMON_DATE_TIME_FORMAT
ROOT_DIRECTORY = '/home/uwcc-admin/curw_mike_data_handler'
# ROOT_DIRECTORY = 'D:\curw_mike_data_handlers'
OUTPUT_DIRECTORY = "/mnt/disks/wrf_nfs/mike/inputs"
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
f.write('\n'.join(data))
def append_to_file(file_name, data):
with open(file_name, 'a+') as f:
f.write('\n'.join(data))
def append_file_to_file(file_name, file_content):
with open(file_name, 'a+') as f:
f.write('\n')
f.write(file_content)
def makedir_if_not_exist_given_filepath(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
pass
def read_attribute_from_config_file(attribute, config, compulsory=False):
"""
:param attribute: key name of the config json file
:param config: loaded json file
:param compulsory: Boolean value: whether the attribute is must present or not in the config file
:return:
"""
if attribute in config and (config[attribute] != ""):
return config[attribute]
elif compulsory:
print("{} not specified in config file.".format(attribute))
exit(1)
else:
# print("{} not specified in config file.".format(attribute))
return None
def check_time_format(time):
try:
time = datetime.strptime(time, DATE_TIME_FORMAT)
if time.strftime('%S') != '00':
print("Seconds should be always 00")
exit(1)
if time.strftime('%M') not in ('00', '15', '30', '45'):
print("Minutes should be always multiple of 15")
exit(1)
return True
except Exception:
print("Time {} is not in proper format".format(time))
exit(1)
def list_of_lists_to_df_first_row_as_columns(data):
"""
:param data: data in list of lists format
:return: equivalent pandas dataframe
"""
return pd.DataFrame.from_records(data[1:], columns=data[0])
def replace_negative_numbers_with_nan(df):
num = df._get_numeric_data()
num[num < 0] = np.nan
return df
def replace_nan_with_row_average(df):
m = df.mean(axis=1)
for i, col in enumerate(df):
df.iloc[:, i] = df.iloc[:, i].fillna(m)
return df
def get_all_obs_rain_hashids_from_curw_sim(pool):
obs_id_hash_id_mappings = {}
expected_earliest_obs_end = (datetime.now() - timedelta(days=1)).strftime(COMMON_DATE_TIME_FORMAT)
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id`, `grid_id` FROM `run` where `model`=%s and `obs_end`>=%s;"
row_count = cursor.execute(sql_statement, ("hechms", expected_earliest_obs_end))
if row_count > 0:
results = cursor.fetchall()
for dict in results:
grid_id = dict.get("grid_id")
grid_id_parts = grid_id.split("_")
obs_id_hash_id_mappings[grid_id_parts[1]] = dict.get("id")
return obs_id_hash_id_mappings
else:
return None
except Exception as exception:
traceback.print_exc()
finally:
if connection is not None:
connection.close()
def prepare_mike_rf_input(start, end, coefficients):
try:
#### process staton based hybrid timeseries ####
distinct_obs_ids = coefficients.curw_obs_id.unique()
hybrid_ts_df = pd.DataFrame()
hybrid_ts_df['time'] = pd.date_range(start=start, end=end, freq='5min')
pool = get_Pool(host=con_params.CURW_SIM_HOST, port=con_params.CURW_SIM_PORT, user=con_params.CURW_SIM_USERNAME,
password=con_params.CURW_SIM_PASSWORD,
db=con_params.CURW_SIM_DATABASE)
TS = Timeseries(pool)
obs_id_hash_id_mapping = get_all_obs_rain_hashids_from_curw_sim(pool)
for obs_id in distinct_obs_ids:
# taking data from curw_sim database (data prepared based on active stations for hechms)
ts = TS.get_timeseries(id_=obs_id_hash_id_mapping.get(str(obs_id)), start_date=start, end_date=end)
ts.insert(0, ['time', obs_id])
ts_df = list_of_lists_to_df_first_row_as_columns(ts)
ts_df[obs_id] = ts_df[obs_id].astype('float64')
hybrid_ts_df = pd.merge(hybrid_ts_df, ts_df, how="left", on='time')
hybrid_ts_df.set_index('time', inplace=True)
hybrid_ts_df = hybrid_ts_df.resample('15min', label='right', closed='right').sum()
# pd.set_option('display.max_rows', hybrid_ts_df.shape[0]+1)
# pd.set_option('display.max_columns', hybrid_ts_df.shape[1]+1)
# print(hybrid_ts_df)
hybrid_ts_df = replace_negative_numbers_with_nan(hybrid_ts_df)
# print(hybrid_ts_df)
hybrid_ts_df = replace_nan_with_row_average(hybrid_ts_df)
# print(hybrid_ts_df)
#### process mike input ####
distinct_names = coefficients.name.unique()
mike_input = pd.DataFrame()
mike_input_initialized = False
for name in distinct_names:
catchment_coefficients = coefficients[coefficients.name == name]
# print(catchment_coefficients)
catchment = pd.DataFrame()
catchment_initialized = False
for index, row in catchment_coefficients.iterrows():
# print(index, row['curw_obs_id'], row['coefficient'])
if not catchment_initialized:
catchment = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment_initialized = True
else:
new = (hybrid_ts_df[row['curw_obs_id']] * row['coefficient']).to_frame(name=row['curw_obs_id'])
catchment = pd.merge(catchment, new, how="left", on='time')
if not mike_input_initialized:
mike_input[name] = catchment.sum(axis=1)
mike_input_initialized = True
else:
mike_input = pd.merge(mike_input, (catchment.sum(axis=1)).to_frame(name=name), how="left", on='time')
mike_input.round(1)
return mike_input
except Exception:
traceback.print_exc()
finally:
destroy_Pool(pool)
def usage():
usageText = """
Usage: ./inputs/gen_mike_input_rf_linux.py [-s "YYYY-MM-DD HH:MM:SS"] [-e "YYYY-MM-DD HH:MM:SS"]
-h --help Show usage
-s --start_time Mike rainfall timeseries start time (e.g: "2019-06-05 00:00:00"). Default is 00:00:00, 3 days before today.
-e --end_time Mike rainfall timeseries end time (e.g: "2019-06-05 23:00:00"). Default is 00:00:00, 2 days after.
"""
print(usageText)
if __name__ == "__main__":
set_db_config_file_path(os.path.join(ROOT_DIRECTORY, 'db_adapter_config.json'))
try:
start_time = None
end_time = None
try:
opts, args = getopt.getopt(sys.argv[1:], "h:s:e:",
["help", "start_time=", "end_time="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--start_time"):
start_time = arg.strip()
elif opt in ("-e", "--end_time"):
end_time = arg.strip()
# Load config params
config = json.loads(open(os.path.join('inputs', 'configs', 'rain_config.json')).read())
output_dir = read_attribute_from_config_file('output_dir', config)
file_name = read_attribute_from_config_file('output_file_name', config)
if start_time is None:
start_time = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d 00:00:00')
else:
check_time_format(time=start_time)
if end_time is None:
|
else:
check_time_format(time=end_time)
if output_dir is None:
output_dir | end_time = (datetime.now() + timedelta(days=2)).strftime('%Y-%m-%d 00:00:00') | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.