body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def draw_scale(self, track):
' draw_scale(self, track) -> ([element, element,...], [element, element,...])\n\n o track Track object\n\n Returns a tuple of (list of elements in the scale, list of labels\n in the scale)\n '
scale_elements = []
scale_labels = []
if (not track.scale):
return ([], [])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (top - ctr)
if (self.sweep < 1):
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
p.addArc(self.xcenter, self.ycenter, ctr, startangledegrees=(90 - (360 * self.sweep)), endangledegrees=90)
scale_elements.append(p)
del p
else:
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr, strokeColor=track.scale_color, fillColor=None))
if track.scale_ticks:
ticklen = (track.scale_largeticks * trackheight)
tickiterval = int(track.scale_largetick_interval)
largeticks = [pos for pos in range((tickiterval * (self.start // tickiterval)), int(self.end), tickiterval) if (pos >= self.start)]
for tickpos in largeticks:
(tick, label) = self.draw_tick(tickpos, ctr, ticklen, track, track.scale_largetick_labels)
scale_elements.append(tick)
if (label is not None):
scale_labels.append(label)
ticklen = (track.scale_smallticks * trackheight)
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos in range((tickiterval * (self.start // tickiterval)), int(self.end), tickiterval) if (pos >= self.start)]
for tickpos in smallticks:
(tick, label) = self.draw_tick(tickpos, ctr, ticklen, track, track.scale_smalltick_labels)
scale_elements.append(tick)
if (label is not None):
scale_labels.append(label)
if track.axis_labels:
for set in track.get_sets():
if (set.__class__ is GraphSet):
for n in xrange(7):
angle = (n * 1.0471975511965976)
(ticksin, tickcos) = (sin(angle), cos(angle))
(x0, y0) = ((self.xcenter + (btm * ticksin)), (self.ycenter + (btm * tickcos)))
(x1, y1) = ((self.xcenter + (top * ticksin)), (self.ycenter + (top * tickcos)))
scale_elements.append(Line(x0, y0, x1, y1, strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
(minval, maxval) = (quartiles[0], quartiles[4])
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
graph_label_min.append(('%.3f' % minval))
graph_label_max.append(('%.3f' % maxval))
graph_label_mid.append(('%.3f' % midval))
else:
diff = max((graph.center - minval), (maxval - graph.center))
minval = (graph.center - diff)
maxval = (graph.center + diff)
midval = graph.center
graph_label_mid.append(('%.3f' % midval))
graph_label_min.append(('%.3f' % minval))
graph_label_max.append(('%.3f' % maxval))
(xmid, ymid) = (((x0 + x1) / 2.0), ((y0 + y1) / 2.0))
for (limit, x, y) in [(graph_label_min, x0, y0), (graph_label_max, x1, y1), (graph_label_mid, xmid, ymid)]:
label = String(0, 0, ';'.join(limit), fontName=track.scale_font, fontSize=track.scale_fontsize, fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, (- ticksin), ticksin, tickcos, x, y)
scale_labels.append(labelgroup)
return (scale_elements, scale_labels) | -2,804,434,453,092,241,400 | draw_scale(self, track) -> ([element, element,...], [element, element,...])
o track Track object
Returns a tuple of (list of elements in the scale, list of labels
in the scale) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_scale | LyonsLab/coge | python | def draw_scale(self, track):
' draw_scale(self, track) -> ([element, element,...], [element, element,...])\n\n o track Track object\n\n Returns a tuple of (list of elements in the scale, list of labels\n in the scale)\n '
scale_elements = []
scale_labels = []
if (not track.scale):
return ([], [])
(btm, ctr, top) = self.track_radii[self.current_track_level]
trackheight = (top - ctr)
if (self.sweep < 1):
p = ArcPath(strokeColor=track.scale_color, fillColor=None)
p.addArc(self.xcenter, self.ycenter, ctr, startangledegrees=(90 - (360 * self.sweep)), endangledegrees=90)
scale_elements.append(p)
del p
else:
scale_elements.append(Circle(self.xcenter, self.ycenter, ctr, strokeColor=track.scale_color, fillColor=None))
if track.scale_ticks:
ticklen = (track.scale_largeticks * trackheight)
tickiterval = int(track.scale_largetick_interval)
largeticks = [pos for pos in range((tickiterval * (self.start // tickiterval)), int(self.end), tickiterval) if (pos >= self.start)]
for tickpos in largeticks:
(tick, label) = self.draw_tick(tickpos, ctr, ticklen, track, track.scale_largetick_labels)
scale_elements.append(tick)
if (label is not None):
scale_labels.append(label)
ticklen = (track.scale_smallticks * trackheight)
tickiterval = int(track.scale_smalltick_interval)
smallticks = [pos for pos in range((tickiterval * (self.start // tickiterval)), int(self.end), tickiterval) if (pos >= self.start)]
for tickpos in smallticks:
(tick, label) = self.draw_tick(tickpos, ctr, ticklen, track, track.scale_smalltick_labels)
scale_elements.append(tick)
if (label is not None):
scale_labels.append(label)
if track.axis_labels:
for set in track.get_sets():
if (set.__class__ is GraphSet):
for n in xrange(7):
angle = (n * 1.0471975511965976)
(ticksin, tickcos) = (sin(angle), cos(angle))
(x0, y0) = ((self.xcenter + (btm * ticksin)), (self.ycenter + (btm * tickcos)))
(x1, y1) = ((self.xcenter + (top * ticksin)), (self.ycenter + (top * tickcos)))
scale_elements.append(Line(x0, y0, x1, y1, strokeColor=track.scale_color))
graph_label_min = []
graph_label_max = []
graph_label_mid = []
for graph in set.get_graphs():
quartiles = graph.quartiles()
(minval, maxval) = (quartiles[0], quartiles[4])
if (graph.center is None):
midval = ((maxval + minval) / 2.0)
graph_label_min.append(('%.3f' % minval))
graph_label_max.append(('%.3f' % maxval))
graph_label_mid.append(('%.3f' % midval))
else:
diff = max((graph.center - minval), (maxval - graph.center))
minval = (graph.center - diff)
maxval = (graph.center + diff)
midval = graph.center
graph_label_mid.append(('%.3f' % midval))
graph_label_min.append(('%.3f' % minval))
graph_label_max.append(('%.3f' % maxval))
(xmid, ymid) = (((x0 + x1) / 2.0), ((y0 + y1) / 2.0))
for (limit, x, y) in [(graph_label_min, x0, y0), (graph_label_max, x1, y1), (graph_label_mid, xmid, ymid)]:
label = String(0, 0, ';'.join(limit), fontName=track.scale_font, fontSize=track.scale_fontsize, fillColor=track.scale_color)
label.textAnchor = 'middle'
labelgroup = Group(label)
labelgroup.transform = (tickcos, (- ticksin), ticksin, tickcos, x, y)
scale_labels.append(labelgroup)
return (scale_elements, scale_labels) |
def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
' draw_tick(self, tickpos, ctr, ticklen) -> (element, element)\n\n o tickpos Int, position of the tick on the sequence\n\n o ctr Float, Y co-ord of the center of the track\n\n o ticklen How long to draw the tick\n\n o track Track, the track the tick is drawn on\n\n o draw_label Boolean, write the tick label?\n\n Returns a drawing element that is the tick on the scale\n '
(tickangle, tickcos, ticksin) = self.canvas_angle(tickpos)
(x0, y0) = ((self.xcenter + (ctr * ticksin)), (self.ycenter + (ctr * tickcos)))
(x1, y1) = ((self.xcenter + ((ctr + ticklen) * ticksin)), (self.ycenter + ((ctr + ticklen) * tickcos)))
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label:
if (track.scale_format == 'SInt'):
if (tickpos >= 1000000):
tickstring = (str((tickpos // 1000000)) + ' Mbp')
elif (tickpos >= 1000):
tickstring = (str((tickpos // 1000)) + ' Kbp')
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, fontName=track.scale_font, fontSize=track.scale_fontsize, fillColor=track.scale_color)
if (tickangle > pi):
label.textAnchor = 'end'
labelgroup = Group(label)
labelgroup.transform = (1, 0, 0, 1, x1, y1)
else:
labelgroup = None
return (tick, labelgroup) | -8,173,748,203,641,748,000 | draw_tick(self, tickpos, ctr, ticklen) -> (element, element)
o tickpos Int, position of the tick on the sequence
o ctr Float, Y co-ord of the center of the track
o ticklen How long to draw the tick
o track Track, the track the tick is drawn on
o draw_label Boolean, write the tick label?
Returns a drawing element that is the tick on the scale | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_tick | LyonsLab/coge | python | def draw_tick(self, tickpos, ctr, ticklen, track, draw_label):
' draw_tick(self, tickpos, ctr, ticklen) -> (element, element)\n\n o tickpos Int, position of the tick on the sequence\n\n o ctr Float, Y co-ord of the center of the track\n\n o ticklen How long to draw the tick\n\n o track Track, the track the tick is drawn on\n\n o draw_label Boolean, write the tick label?\n\n Returns a drawing element that is the tick on the scale\n '
(tickangle, tickcos, ticksin) = self.canvas_angle(tickpos)
(x0, y0) = ((self.xcenter + (ctr * ticksin)), (self.ycenter + (ctr * tickcos)))
(x1, y1) = ((self.xcenter + ((ctr + ticklen) * ticksin)), (self.ycenter + ((ctr + ticklen) * tickcos)))
tick = Line(x0, y0, x1, y1, strokeColor=track.scale_color)
if draw_label:
if (track.scale_format == 'SInt'):
if (tickpos >= 1000000):
tickstring = (str((tickpos // 1000000)) + ' Mbp')
elif (tickpos >= 1000):
tickstring = (str((tickpos // 1000)) + ' Kbp')
else:
tickstring = str(tickpos)
else:
tickstring = str(tickpos)
label = String(0, 0, tickstring, fontName=track.scale_font, fontSize=track.scale_fontsize, fillColor=track.scale_color)
if (tickangle > pi):
label.textAnchor = 'end'
labelgroup = Group(label)
labelgroup.transform = (1, 0, 0, 1, x1, y1)
else:
labelgroup = None
return (tick, labelgroup) |
def draw_test_tracks(self):
' draw_test_tracks(self)\n\n Draw blue ones indicating tracks to be drawn, with a green line\n down the center.\n '
for track in self.drawn_tracks:
(btm, ctr, top) = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top, strokeColor=colors.blue, fillColor=None))
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr, strokeColor=colors.green, fillColor=None))
self.drawing.add(Circle(self.xcenter, self.ycenter, btm, strokeColor=colors.blue, fillColor=None)) | 7,809,708,974,303,596,000 | draw_test_tracks(self)
Draw blue ones indicating tracks to be drawn, with a green line
down the center. | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_test_tracks | LyonsLab/coge | python | def draw_test_tracks(self):
' draw_test_tracks(self)\n\n Draw blue ones indicating tracks to be drawn, with a green line\n down the center.\n '
for track in self.drawn_tracks:
(btm, ctr, top) = self.track_radii[track]
self.drawing.add(Circle(self.xcenter, self.ycenter, top, strokeColor=colors.blue, fillColor=None))
self.drawing.add(Circle(self.xcenter, self.ycenter, ctr, strokeColor=colors.green, fillColor=None))
self.drawing.add(Circle(self.xcenter, self.ycenter, btm, strokeColor=colors.blue, fillColor=None)) |
def draw_greytrack(self, track):
' draw_greytrack(self)\n\n o track Track object\n\n Put in a grey background to the current track, if the track\n specifies that we should\n '
greytrack_bgs = []
greytrack_labels = []
if (not track.greytrack):
return ([], [])
(btm, ctr, top) = self.track_radii[self.current_track_level]
if (self.sweep < 1):
bg = self._draw_arc(btm, top, 0, ((2 * pi) * self.sweep), colors.Color(0.96, 0.96, 0.96))
else:
bg = Circle(self.xcenter, self.ycenter, ctr, strokeColor=colors.Color(0.96, 0.96, 0.96), fillColor=None, strokeWidth=(top - btm))
greytrack_bgs.append(bg)
if track.greytrack_labels:
labelstep = (self.length // track.greytrack_labels)
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, fontName=track.greytrack_font, fontSize=track.greytrack_fontsize, fillColor=track.greytrack_fontcolor)
(theta, costheta, sintheta) = self.canvas_angle(pos)
(x, y) = ((self.xcenter + (btm * sintheta)), (self.ycenter + (btm * costheta)))
labelgroup = Group(label)
labelangle = (((((self.sweep * 2) * pi) * (pos - self.start)) / self.length) - (pi / 2))
if (theta > pi):
label.textAnchor = 'end'
labelangle += pi
(cosA, sinA) = (cos(labelangle), sin(labelangle))
labelgroup.transform = (cosA, (- sinA), sinA, cosA, x, y)
if (not ((self.length - x) <= labelstep)):
greytrack_labels.append(labelgroup)
return (greytrack_bgs, greytrack_labels) | 3,037,287,966,127,989,000 | draw_greytrack(self)
o track Track object
Put in a grey background to the current track, if the track
specifies that we should | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | draw_greytrack | LyonsLab/coge | python | def draw_greytrack(self, track):
' draw_greytrack(self)\n\n o track Track object\n\n Put in a grey background to the current track, if the track\n specifies that we should\n '
greytrack_bgs = []
greytrack_labels = []
if (not track.greytrack):
return ([], [])
(btm, ctr, top) = self.track_radii[self.current_track_level]
if (self.sweep < 1):
bg = self._draw_arc(btm, top, 0, ((2 * pi) * self.sweep), colors.Color(0.96, 0.96, 0.96))
else:
bg = Circle(self.xcenter, self.ycenter, ctr, strokeColor=colors.Color(0.96, 0.96, 0.96), fillColor=None, strokeWidth=(top - btm))
greytrack_bgs.append(bg)
if track.greytrack_labels:
labelstep = (self.length // track.greytrack_labels)
for pos in range(self.start, self.end, labelstep):
label = String(0, 0, track.name, fontName=track.greytrack_font, fontSize=track.greytrack_fontsize, fillColor=track.greytrack_fontcolor)
(theta, costheta, sintheta) = self.canvas_angle(pos)
(x, y) = ((self.xcenter + (btm * sintheta)), (self.ycenter + (btm * costheta)))
labelgroup = Group(label)
labelangle = (((((self.sweep * 2) * pi) * (pos - self.start)) / self.length) - (pi / 2))
if (theta > pi):
label.textAnchor = 'end'
labelangle += pi
(cosA, sinA) = (cos(labelangle), sin(labelangle))
labelgroup.transform = (cosA, (- sinA), sinA, cosA, x, y)
if (not ((self.length - x) <= labelstep)):
greytrack_labels.append(labelgroup)
return (greytrack_bgs, greytrack_labels) |
def canvas_angle(self, base):
' canvas_angle(self, base) -> (float, float, float)\n '
angle = ((((self.sweep * 2) * pi) * (base - self.start)) / self.length)
return (angle, cos(angle), sin(angle)) | -6,761,564,059,399,719,000 | canvas_angle(self, base) -> (float, float, float) | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | canvas_angle | LyonsLab/coge | python | def canvas_angle(self, base):
' \n '
angle = ((((self.sweep * 2) * pi) * (base - self.start)) / self.length)
return (angle, cos(angle), sin(angle)) |
def _draw_arc(self, inner_radius, outer_radius, startangle, endangle, color, border=None, colour=None, **kwargs):
' draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)\n -> Group\n\n o inner_radius Float distance of inside of arc from drawing center\n\n o outer_radius Float distance of outside of arc from drawing center\n\n o startangle Float angle subtended by start of arc at drawing center\n (in radians)\n\n o endangle Float angle subtended by end of arc at drawing center\n (in radians)\n\n o color colors.Color object for arc (overridden by backwards\n compatible argument with UK spelling, colour).\n\n Returns a closed path object describing an arced box corresponding to\n the passed values. For very small angles, a simple four sided\n polygon is used.\n '
if (colour is not None):
color = colour
if (border is None):
border = color
if (color is None):
color = colour
if ((color == colors.white) and (border is None)):
strokecolor = colors.black
elif (border is None):
strokecolor = color
elif (border is not None):
strokecolor = border
if (abs(float((endangle - startangle))) > 0.01):
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokewidth=0)
p.addArc(self.xcenter, self.ycenter, inner_radius, (90 - ((endangle * 180) / pi)), (90 - ((startangle * 180) / pi)), moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius, (90 - ((endangle * 180) / pi)), (90 - ((startangle * 180) / pi)), reverse=True)
p.closePath()
return p
else:
(startcos, startsin) = (cos(startangle), sin(startangle))
(endcos, endsin) = (cos(endangle), sin(endangle))
(x0, y0) = (self.xcenter, self.ycenter)
(x1, y1) = ((x0 + (inner_radius * startsin)), (y0 + (inner_radius * startcos)))
(x2, y2) = ((x0 + (inner_radius * endsin)), (y0 + (inner_radius * endcos)))
(x3, y3) = ((x0 + (outer_radius * endsin)), (y0 + (outer_radius * endcos)))
(x4, y4) = ((x0 + (outer_radius * startsin)), (y0 + (outer_radius * startcos)))
return draw_polygon([(x1, y1), (x2, y2), (x3, y3), (x4, y4)], color, border) | 6,991,388,575,596,459,000 | draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)
-> Group
o inner_radius Float distance of inside of arc from drawing center
o outer_radius Float distance of outside of arc from drawing center
o startangle Float angle subtended by start of arc at drawing center
(in radians)
o endangle Float angle subtended by end of arc at drawing center
(in radians)
o color colors.Color object for arc (overridden by backwards
compatible argument with UK spelling, colour).
Returns a closed path object describing an arced box corresponding to
the passed values. For very small angles, a simple four sided
polygon is used. | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | _draw_arc | LyonsLab/coge | python | def _draw_arc(self, inner_radius, outer_radius, startangle, endangle, color, border=None, colour=None, **kwargs):
' draw_arc(self, inner_radius, outer_radius, startangle, endangle, color)\n -> Group\n\n o inner_radius Float distance of inside of arc from drawing center\n\n o outer_radius Float distance of outside of arc from drawing center\n\n o startangle Float angle subtended by start of arc at drawing center\n (in radians)\n\n o endangle Float angle subtended by end of arc at drawing center\n (in radians)\n\n o color colors.Color object for arc (overridden by backwards\n compatible argument with UK spelling, colour).\n\n Returns a closed path object describing an arced box corresponding to\n the passed values. For very small angles, a simple four sided\n polygon is used.\n '
if (colour is not None):
color = colour
if (border is None):
border = color
if (color is None):
color = colour
if ((color == colors.white) and (border is None)):
strokecolor = colors.black
elif (border is None):
strokecolor = color
elif (border is not None):
strokecolor = border
if (abs(float((endangle - startangle))) > 0.01):
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokewidth=0)
p.addArc(self.xcenter, self.ycenter, inner_radius, (90 - ((endangle * 180) / pi)), (90 - ((startangle * 180) / pi)), moveTo=True)
p.addArc(self.xcenter, self.ycenter, outer_radius, (90 - ((endangle * 180) / pi)), (90 - ((startangle * 180) / pi)), reverse=True)
p.closePath()
return p
else:
(startcos, startsin) = (cos(startangle), sin(startangle))
(endcos, endsin) = (cos(endangle), sin(endangle))
(x0, y0) = (self.xcenter, self.ycenter)
(x1, y1) = ((x0 + (inner_radius * startsin)), (y0 + (inner_radius * startcos)))
(x2, y2) = ((x0 + (inner_radius * endsin)), (y0 + (inner_radius * endcos)))
(x3, y3) = ((x0 + (outer_radius * endsin)), (y0 + (outer_radius * endcos)))
(x4, y4) = ((x0 + (outer_radius * startsin)), (y0 + (outer_radius * startcos)))
return draw_polygon([(x1, y1), (x2, y2), (x3, y3), (x4, y4)], color, border) |
def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle, color, border=None, shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right', colour=None, **kwargs):
'Draw an arrow along an arc.'
if (colour is not None):
color = colour
if (border is None):
border = color
if (color is None):
color = colour
if ((color == colors.white) and (border is None)):
strokecolor = colors.black
elif (border is None):
strokecolor = color
elif (border is not None):
strokecolor = border
(startangle, endangle) = (min(startangle, endangle), max(startangle, endangle))
if ((orientation != 'left') and (orientation != 'right')):
raise ValueError(("Invalid orientation %s, should be 'left' or 'right'" % repr(orientation)))
angle = float((endangle - startangle))
middle_radius = (0.5 * (inner_radius + outer_radius))
boxheight = (outer_radius - inner_radius)
shaft_height = (boxheight * shaft_height_ratio)
shaft_inner_radius = (middle_radius - (0.5 * shaft_height))
shaft_outer_radius = (middle_radius + (0.5 * shaft_height))
headangle_delta = max(0.0, min(((abs(boxheight) * head_length_ratio) / middle_radius), abs(angle)))
if (angle < 0):
headangle_delta *= (- 1)
if (orientation == 'right'):
headangle = (endangle - headangle_delta)
else:
headangle = (startangle + headangle_delta)
if (startangle <= endangle):
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert ((startangle <= headangle <= endangle) or (endangle <= headangle <= startangle)), (startangle, headangle, endangle, angle)
(startcos, startsin) = (cos(startangle), sin(startangle))
(headcos, headsin) = (cos(headangle), sin(headangle))
(endcos, endsin) = (cos(endangle), sin(endangle))
(x0, y0) = (self.xcenter, self.ycenter)
if ((0.5 >= abs(angle)) and (abs(headangle_delta) >= abs(angle))):
if (orientation == 'right'):
(x1, y1) = ((x0 + (inner_radius * startsin)), (y0 + (inner_radius * startcos)))
(x2, y2) = ((x0 + (outer_radius * startsin)), (y0 + (outer_radius * startcos)))
(x3, y3) = ((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
else:
(x1, y1) = ((x0 + (inner_radius * endsin)), (y0 + (inner_radius * endcos)))
(x2, y2) = ((x0 + (outer_radius * endsin)), (y0 + (outer_radius * endcos)))
(x3, y3) = ((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
return Polygon([x1, y1, x2, y2, x3, y3], strokeColor=(border or color), fillColor=color, strokeLineJoin=1, strokewidth=0)
elif (orientation == 'right'):
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokeLineJoin=1, strokewidth=0, **kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius, (90 - ((headangle * 180) / pi)), (90 - ((startangle * 180) / pi)), moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius, (90 - ((headangle * 180) / pi)), (90 - ((startangle * 180) / pi)), reverse=True)
p.lineTo((x0 + (outer_radius * headsin)), (y0 + (outer_radius * headcos)))
if (abs(angle) < 0.5):
p.lineTo((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
else:
dx = min(0.1, (abs(angle) / 50.0))
x = dx
while (x < 1):
r = (outer_radius - (x * (outer_radius - middle_radius)))
a = (headangle + (x * (endangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
x = dx
while (x < 1):
r = (middle_radius - (x * (middle_radius - inner_radius)))
a = (headangle + ((1 - x) * (endangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokeLineJoin=1, strokewidth=0, **kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius, (90 - ((endangle * 180) / pi)), (90 - ((headangle * 180) / pi)), moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius, (90 - ((endangle * 180) / pi)), (90 - ((headangle * 180) / pi)), reverse=False)
p.lineTo((x0 + (outer_radius * headsin)), (y0 + (outer_radius * headcos)))
if (abs(angle) < 0.5):
p.lineTo((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
else:
dx = min(0.1, (abs(angle) / 50.0))
x = dx
while (x < 1):
r = (outer_radius - (x * (outer_radius - middle_radius)))
a = (headangle + (x * (startangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
x = dx
while (x < 1):
r = (middle_radius - (x * (middle_radius - inner_radius)))
a = (headangle + ((1 - x) * (startangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
p.closePath()
return p | -7,027,955,583,085,468,000 | Draw an arrow along an arc. | bin/last_wrapper/Bio/Graphics/GenomeDiagram/_CircularDrawer.py | _draw_arc_arrow | LyonsLab/coge | python | def _draw_arc_arrow(self, inner_radius, outer_radius, startangle, endangle, color, border=None, shaft_height_ratio=0.4, head_length_ratio=0.5, orientation='right', colour=None, **kwargs):
if (colour is not None):
color = colour
if (border is None):
border = color
if (color is None):
color = colour
if ((color == colors.white) and (border is None)):
strokecolor = colors.black
elif (border is None):
strokecolor = color
elif (border is not None):
strokecolor = border
(startangle, endangle) = (min(startangle, endangle), max(startangle, endangle))
if ((orientation != 'left') and (orientation != 'right')):
raise ValueError(("Invalid orientation %s, should be 'left' or 'right'" % repr(orientation)))
angle = float((endangle - startangle))
middle_radius = (0.5 * (inner_radius + outer_radius))
boxheight = (outer_radius - inner_radius)
shaft_height = (boxheight * shaft_height_ratio)
shaft_inner_radius = (middle_radius - (0.5 * shaft_height))
shaft_outer_radius = (middle_radius + (0.5 * shaft_height))
headangle_delta = max(0.0, min(((abs(boxheight) * head_length_ratio) / middle_radius), abs(angle)))
if (angle < 0):
headangle_delta *= (- 1)
if (orientation == 'right'):
headangle = (endangle - headangle_delta)
else:
headangle = (startangle + headangle_delta)
if (startangle <= endangle):
headangle = max(min(headangle, endangle), startangle)
else:
headangle = max(min(headangle, startangle), endangle)
assert ((startangle <= headangle <= endangle) or (endangle <= headangle <= startangle)), (startangle, headangle, endangle, angle)
(startcos, startsin) = (cos(startangle), sin(startangle))
(headcos, headsin) = (cos(headangle), sin(headangle))
(endcos, endsin) = (cos(endangle), sin(endangle))
(x0, y0) = (self.xcenter, self.ycenter)
if ((0.5 >= abs(angle)) and (abs(headangle_delta) >= abs(angle))):
if (orientation == 'right'):
(x1, y1) = ((x0 + (inner_radius * startsin)), (y0 + (inner_radius * startcos)))
(x2, y2) = ((x0 + (outer_radius * startsin)), (y0 + (outer_radius * startcos)))
(x3, y3) = ((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
else:
(x1, y1) = ((x0 + (inner_radius * endsin)), (y0 + (inner_radius * endcos)))
(x2, y2) = ((x0 + (outer_radius * endsin)), (y0 + (outer_radius * endcos)))
(x3, y3) = ((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
return Polygon([x1, y1, x2, y2, x3, y3], strokeColor=(border or color), fillColor=color, strokeLineJoin=1, strokewidth=0)
elif (orientation == 'right'):
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokeLineJoin=1, strokewidth=0, **kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius, (90 - ((headangle * 180) / pi)), (90 - ((startangle * 180) / pi)), moveTo=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius, (90 - ((headangle * 180) / pi)), (90 - ((startangle * 180) / pi)), reverse=True)
p.lineTo((x0 + (outer_radius * headsin)), (y0 + (outer_radius * headcos)))
if (abs(angle) < 0.5):
p.lineTo((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
else:
dx = min(0.1, (abs(angle) / 50.0))
x = dx
while (x < 1):
r = (outer_radius - (x * (outer_radius - middle_radius)))
a = (headangle + (x * (endangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (middle_radius * endsin)), (y0 + (middle_radius * endcos)))
x = dx
while (x < 1):
r = (middle_radius - (x * (middle_radius - inner_radius)))
a = (headangle + ((1 - x) * (endangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
p.closePath()
return p
else:
p = ArcPath(strokeColor=strokecolor, fillColor=color, strokeLineJoin=1, strokewidth=0, **kwargs)
p.addArc(self.xcenter, self.ycenter, shaft_inner_radius, (90 - ((endangle * 180) / pi)), (90 - ((headangle * 180) / pi)), moveTo=True, reverse=True)
p.addArc(self.xcenter, self.ycenter, shaft_outer_radius, (90 - ((endangle * 180) / pi)), (90 - ((headangle * 180) / pi)), reverse=False)
p.lineTo((x0 + (outer_radius * headsin)), (y0 + (outer_radius * headcos)))
if (abs(angle) < 0.5):
p.lineTo((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
else:
dx = min(0.1, (abs(angle) / 50.0))
x = dx
while (x < 1):
r = (outer_radius - (x * (outer_radius - middle_radius)))
a = (headangle + (x * (startangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (middle_radius * startsin)), (y0 + (middle_radius * startcos)))
x = dx
while (x < 1):
r = (middle_radius - (x * (middle_radius - inner_radius)))
a = (headangle + ((1 - x) * (startangle - headangle)))
p.lineTo((x0 + (r * sin(a))), (y0 + (r * cos(a))))
x += dx
p.lineTo((x0 + (inner_radius * headsin)), (y0 + (inner_radius * headcos)))
p.closePath()
return p |
def all_categories_grouping(row: pd.Series) -> str:
'\n Merge Category, Fuel and segment to a single string for unique categorization\n '
if (row['Fuel'] == 'Battery Electric'):
return ((row['Category'] + ' / ') + row['Fuel'])
else:
try:
result = ((((row['Fuel'] + ' / ') + row['Segment']) + ' / ') + row['Euro Standard'])
except:
result = row['Fuel']
return result | -6,056,978,028,939,857,000 | Merge Category, Fuel and segment to a single string for unique categorization | Graphing/MeanActivityHorizontalBarChart.py | all_categories_grouping | actuatech/fuel-tourism | python | def all_categories_grouping(row: pd.Series) -> str:
'\n \n '
if (row['Fuel'] == 'Battery Electric'):
return ((row['Category'] + ' / ') + row['Fuel'])
else:
try:
result = ((((row['Fuel'] + ' / ') + row['Segment']) + ' / ') + row['Euro Standard'])
except:
result = row['Fuel']
return result |
def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
'\n Horizontal bar chart representing mean activity and other activities per unique categorization\n\n :param stock_and_mileage_df: Dataframe of the vehicles registration list\n :param output_folder: output folder name where to store resulting chart\n :return: an html file containing the horizontal bar chart of the mean activity\n '
data = stock_and_mileage_df.copy()
data = data[(data['Category'] != 'Off Road')]
data['segmentation'] = data.apply((lambda row: all_categories_grouping(row)), axis=1)
horizontal_plot = go.Figure()
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers', name='Activitat màxima', marker_color='rgb(288, 26, 28)'))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers', name='Activitat mínima', marker_color='rgb(229, 196, 148)'))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers', name="Desviació standard de l'activitat", marker=dict(color='rgb(800, 800, 800)', opacity=0)))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers', name='Estoc', marker=dict(color='rgb(800, 800, 800)', opacity=0)))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers', name='Lifetime cumulative activity mitja', marker=dict(color='rgb(800, 800, 800)', opacity=0)))
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(y=data[(data['Category'] == category)]['segmentation'], x=data[(data['Category'] == category)]['Mean_Activity'], orientation='h', marker_color=COLOR_DISCRETE_MAP[category], name=f'Activitat mitjana {category}'))
horizontal_plot.update_layout(title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra", title_x=0.5, height=4000, width=1500, template='plotly_white', xaxis_title='Activitat mitja (km/any)', yaxis_title='Tipologia de vehicle', hovermode='y unified', hoverlabel=dict(namelength=100), xaxis_range=[0, (stock_and_mileage_df['Max_Activity'].max() * 1.05)], xaxis=dict(tickmode='array', tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000], ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k']))
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
filename = (output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html")
horizontal_plot.write_html(filename) | -8,935,870,598,162,960,000 | Horizontal bar chart representing mean activity and other activities per unique categorization
:param stock_and_mileage_df: Dataframe of the vehicles registration list
:param output_folder: output folder name where to store resulting chart
:return: an html file containing the horizontal bar chart of the mean activity | Graphing/MeanActivityHorizontalBarChart.py | activity_horizontal_bar_chart | actuatech/fuel-tourism | python | def activity_horizontal_bar_chart(stock_and_mileage_df: pd.DataFrame.groupby, output_folder):
'\n Horizontal bar chart representing mean activity and other activities per unique categorization\n\n :param stock_and_mileage_df: Dataframe of the vehicles registration list\n :param output_folder: output folder name where to store resulting chart\n :return: an html file containing the horizontal bar chart of the mean activity\n '
data = stock_and_mileage_df.copy()
data = data[(data['Category'] != 'Off Road')]
data['segmentation'] = data.apply((lambda row: all_categories_grouping(row)), axis=1)
horizontal_plot = go.Figure()
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Max_Activity'], mode='markers', name='Activitat màxima', marker_color='rgb(288, 26, 28)'))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Min_Activity'], mode='markers', name='Activitat mínima', marker_color='rgb(229, 196, 148)'))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Std_Activity'], mode='markers', name="Desviació standard de l'activitat", marker=dict(color='rgb(800, 800, 800)', opacity=0)))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Stock'], mode='markers', name='Estoc', marker=dict(color='rgb(800, 800, 800)', opacity=0)))
horizontal_plot.add_trace(go.Scatter(y=data['segmentation'], x=data['Mean_Lifetime_Activity'], mode='markers', name='Lifetime cumulative activity mitja', marker=dict(color='rgb(800, 800, 800)', opacity=0)))
for category in CATEGORIES:
horizontal_plot.add_trace(go.Bar(y=data[(data['Category'] == category)]['segmentation'], x=data[(data['Category'] == category)]['Mean_Activity'], orientation='h', marker_color=COLOR_DISCRETE_MAP[category], name=f'Activitat mitjana {category}'))
horizontal_plot.update_layout(title="Activitat mitjana anual segons classificació del parc de vehicles d'Andorra", title_x=0.5, height=4000, width=1500, template='plotly_white', xaxis_title='Activitat mitja (km/any)', yaxis_title='Tipologia de vehicle', hovermode='y unified', hoverlabel=dict(namelength=100), xaxis_range=[0, (stock_and_mileage_df['Max_Activity'].max() * 1.05)], xaxis=dict(tickmode='array', tickvals=[0, 5000, 15000, 25000, 50000, 100000, 150000, 200000], ticktext=['0', '5k', '15k', '25k', '50k', '100k', '150k', '200k']))
horizontal_plot.update_xaxes(showgrid=True, zeroline=True)
horizontal_plot.show()
filename = (output_folder + "Activitat mitjana anual segons classificació del parc de vehicles d'Andorra.html")
horizontal_plot.write_html(filename) |
def handle(event, context):
'\n Called by a module when it is deployed to register it\n :param dict event:\n :param context:\n :return dict:\n '
return RegisterModuleHandler().handle(event, context) | -8,657,518,357,971,014,000 | Called by a module when it is deployed to register it
:param dict event:
:param context:
:return dict: | functions/register_module/main.py | handle | WycliffeAssociates/tx-manager | python | def handle(event, context):
'\n Called by a module when it is deployed to register it\n :param dict event:\n :param context:\n :return dict:\n '
return RegisterModuleHandler().handle(event, context) |
def add(self, outputs, targets):
'\n Adding metric for each batch\n\n :param outputs: outputs of the model\n :param targets: targets of the model\n '
raise NotImplementedError() | -2,640,453,835,389,625,300 | Adding metric for each batch
:param outputs: outputs of the model
:param targets: targets of the model | facade_project/utils/ml_utils.py | add | gregunz/FacadeParsing | python | def add(self, outputs, targets):
'\n Adding metric for each batch\n\n :param outputs: outputs of the model\n :param targets: targets of the model\n '
raise NotImplementedError() |
def compute(self, phase):
"\n Aggregate accumulated metrics over batches at the end of the epoch\n\n :param phase: either 'train' or 'val'\n "
raise NotImplementedError() | -6,180,832,812,341,854,000 | Aggregate accumulated metrics over batches at the end of the epoch
:param phase: either 'train' or 'val' | facade_project/utils/ml_utils.py | compute | gregunz/FacadeParsing | python | def compute(self, phase):
"\n Aggregate accumulated metrics over batches at the end of the epoch\n\n :param phase: either 'train' or 'val'\n "
raise NotImplementedError() |
def description(self, phase):
"\n Description of the current metrics\n\n :param phase: either 'train' or 'val'\n :return: str\n "
raise NotImplementedError() | -7,212,118,863,517,066,000 | Description of the current metrics
:param phase: either 'train' or 'val'
:return: str | facade_project/utils/ml_utils.py | description | gregunz/FacadeParsing | python | def description(self, phase):
"\n Description of the current metrics\n\n :param phase: either 'train' or 'val'\n :return: str\n "
raise NotImplementedError() |
def scalar_infos(self, phase):
"\n Return list of tuple to use with tensorboard writer object 'add_scalar' function\n\n :param phase: either 'train' or 'val'\n :return: [tuple(str, number)]\n "
raise NotImplementedError() | -7,094,517,346,417,059,000 | Return list of tuple to use with tensorboard writer object 'add_scalar' function
:param phase: either 'train' or 'val'
:return: [tuple(str, number)] | facade_project/utils/ml_utils.py | scalar_infos | gregunz/FacadeParsing | python | def scalar_infos(self, phase):
"\n Return list of tuple to use with tensorboard writer object 'add_scalar' function\n\n :param phase: either 'train' or 'val'\n :return: [tuple(str, number)]\n "
raise NotImplementedError() |
def description_best(self):
'\n Description of the best metrics\n\n :return: str\n '
raise NotImplementedError() | 851,480,759,500,827,000 | Description of the best metrics
:return: str | facade_project/utils/ml_utils.py | description_best | gregunz/FacadeParsing | python | def description_best(self):
'\n Description of the best metrics\n\n :return: str\n '
raise NotImplementedError() |
def handle_rpc_errors(fnc):
'Decorator to add more context to RPC errors'
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
exc.code().value
exc.details()
if (exc.code() == grpc.StatusCode.UNIMPLEMENTED):
print('unimplemented')
raise exc
elif (exc.code() == grpc.StatusCode.UNAVAILABLE):
print('UNAVAILABLE')
print(f'ERROR MESSAGE: {exc.details()}')
elif ((exc.code() == grpc.StatusCode.UNKNOWN) and (exc.details() == 'wallet locked, unlock it to enable full RPC access')):
print('WALLET IS LOCKED!')
raise exc
elif (exc.code() == grpc.StatusCode.UNKNOWN):
print('unknown')
print(f'ERROR MESSAGE: {exc.details()}')
elif (exc.code() == grpc.StatusCode.NOT_FOUND):
print('NOT FOUND')
print(f'ERROR MESSAGE: {exc.details()}')
elif (exc.code() == grpc.StatusCode.PERMISSION_DENIED):
print('PERMISSION_DENIED')
print(f'ERROR MESSAGE: {exc.details()}')
else:
raise exc
return exc
except Exception as exc:
print('unknown exception')
print(exc)
return wrapper | 5,801,486,384,072,298,000 | Decorator to add more context to RPC errors | lndgrpc/errors.py | handle_rpc_errors | ibz/lnd-grpc-client | python | def handle_rpc_errors(fnc):
@wraps(fnc)
def wrapper(*args, **kwargs):
try:
return fnc(*args, **kwargs)
except grpc.RpcError as exc:
exc.code().value
exc.details()
if (exc.code() == grpc.StatusCode.UNIMPLEMENTED):
print('unimplemented')
raise exc
elif (exc.code() == grpc.StatusCode.UNAVAILABLE):
print('UNAVAILABLE')
print(f'ERROR MESSAGE: {exc.details()}')
elif ((exc.code() == grpc.StatusCode.UNKNOWN) and (exc.details() == 'wallet locked, unlock it to enable full RPC access')):
print('WALLET IS LOCKED!')
raise exc
elif (exc.code() == grpc.StatusCode.UNKNOWN):
print('unknown')
print(f'ERROR MESSAGE: {exc.details()}')
elif (exc.code() == grpc.StatusCode.NOT_FOUND):
print('NOT FOUND')
print(f'ERROR MESSAGE: {exc.details()}')
elif (exc.code() == grpc.StatusCode.PERMISSION_DENIED):
print('PERMISSION_DENIED')
print(f'ERROR MESSAGE: {exc.details()}')
else:
raise exc
return exc
except Exception as exc:
print('unknown exception')
print(exc)
return wrapper |
@receiver(project_import)
def handle_project_import(sender, **kwargs):
'Add post-commit hook on project import'
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if (service is not None):
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed')) | 1,579,200,990,597,520,600 | Add post-commit hook on project import | readthedocs/projects/signals.py | handle_project_import | ank-forked/readthedocs.org | python | @receiver(project_import)
def handle_project_import(sender, **kwargs):
project = sender
request = kwargs.get('request')
for service_cls in registry:
if service_cls.is_project_service(project):
service = service_cls.for_user(request.user)
if (service is not None):
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
else:
messages.error(request, _('Webhook configuration failed')) |
def validate(self, data):
'Verify passwords match.'
passwd = data['password']
passwd_conf = data['password_confirmation']
if (passwd != passwd_conf):
raise serializers.ValidationError("Passwords don't match.")
password_validation.validate_password(passwd)
return data | -5,031,918,613,459,909,000 | Verify passwords match. | cride/users/serializers/users.py | validate | eocode/Rider-App | python | def validate(self, data):
passwd = data['password']
passwd_conf = data['password_confirmation']
if (passwd != passwd_conf):
raise serializers.ValidationError("Passwords don't match.")
password_validation.validate_password(passwd)
return data |
def create(self, data):
'Handle user and profile creation.'
data.pop('password_confirmation')
user = User.objects.create_user(**data, is_verified=False, is_client=True)
profile = Profile.objects.create(user=user)
send_confirmation_email.delay(user_pk=user.pk)
return user | -5,235,343,287,763,265,000 | Handle user and profile creation. | cride/users/serializers/users.py | create | eocode/Rider-App | python | def create(self, data):
data.pop('password_confirmation')
user = User.objects.create_user(**data, is_verified=False, is_client=True)
profile = Profile.objects.create(user=user)
send_confirmation_email.delay(user_pk=user.pk)
return user |
def validate(self, data):
'Check credentials'
user = authenticate(username=data['email'], password=data['password'])
if (not user):
raise serializers.ValidationError('Invalid credentials')
if (not user.is_verified):
raise serializers.ValidationError('Account is not active yet')
self.context['user'] = user
return data | 7,133,942,126,708,929,000 | Check credentials | cride/users/serializers/users.py | validate | eocode/Rider-App | python | def validate(self, data):
user = authenticate(username=data['email'], password=data['password'])
if (not user):
raise serializers.ValidationError('Invalid credentials')
if (not user.is_verified):
raise serializers.ValidationError('Account is not active yet')
self.context['user'] = user
return data |
def create(self, data):
'Generate or retrieve new token'
(token, created) = Token.objects.get_or_create(user=self.context['user'])
return (self.context['user'], token.key) | 4,092,564,994,993,324,000 | Generate or retrieve new token | cride/users/serializers/users.py | create | eocode/Rider-App | python | def create(self, data):
(token, created) = Token.objects.get_or_create(user=self.context['user'])
return (self.context['user'], token.key) |
def validate_token(self, data):
'Verify token is valid'
try:
payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise serializers.ValidationError('Verification link has expired.')
except jwt.exceptions.PyJWTError:
raise serializers.ValidationError('Invalid token')
if (payload['type'] != 'email_confirmation'):
raise serializers.ValidationError('Invalid token')
self.context['payload'] = payload
return data | 7,065,096,429,960,375,000 | Verify token is valid | cride/users/serializers/users.py | validate_token | eocode/Rider-App | python | def validate_token(self, data):
try:
payload = jwt.decode(data, settings.SECRET_KEY, algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise serializers.ValidationError('Verification link has expired.')
except jwt.exceptions.PyJWTError:
raise serializers.ValidationError('Invalid token')
if (payload['type'] != 'email_confirmation'):
raise serializers.ValidationError('Invalid token')
self.context['payload'] = payload
return data |
def save(self):
"Update user's verified status"
payload = self.context['payload']
user = User.objects.get(username=payload['user'])
user.is_verified = True
user.save() | 5,789,362,526,885,159,000 | Update user's verified status | cride/users/serializers/users.py | save | eocode/Rider-App | python | def save(self):
payload = self.context['payload']
user = User.objects.get(username=payload['user'])
user.is_verified = True
user.save() |
def new_style(self, config):
'Install a new (stacked) parser style.\n\n This feature is currently experimental but should mimic the\n previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,\n etc.\n '
if self.style:
style = self.style[(- 1)]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get('TAG_STYLE')
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if (tags is None):
raise Error(('Invalid tag style: %s' % tagstyle))
(start, end) = tags
config['START_TAG'] = config.get('START_TAG', start)
config['END_TAG'] = config.get('END_TAG', end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if (value is not None):
style[key] = value
self.style.append(style)
return style | -3,532,283,925,373,133,000 | Install a new (stacked) parser style.
This feature is currently experimental but should mimic the
previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,
etc. | template/parser.py | new_style | lmr/Template-Toolkit-Python | python | def new_style(self, config):
'Install a new (stacked) parser style.\n\n This feature is currently experimental but should mimic the\n previous behaviour with regard to TAG_STYLE, START_TAG, END_TAG,\n etc.\n '
if self.style:
style = self.style[(- 1)]
else:
style = DEFAULT_STYLE
style = style.copy()
tagstyle = config.get('TAG_STYLE')
if tagstyle:
tags = TAG_STYLE.get(tagstyle)
if (tags is None):
raise Error(('Invalid tag style: %s' % tagstyle))
(start, end) = tags
config['START_TAG'] = config.get('START_TAG', start)
config['END_TAG'] = config.get('END_TAG', end)
for key in DEFAULT_STYLE.keys():
value = config.get(key)
if (value is not None):
style[key] = value
self.style.append(style)
return style |
def old_style(self):
'Pop the current parser style and revert to the previous one.\n\n See new_style(). ** experimental **\n '
if (len(self.style) <= 1):
raise Error('only 1 parser style remaining')
self.style.pop()
return self.style[(- 1)] | -365,749,488,424,430,850 | Pop the current parser style and revert to the previous one.
See new_style(). ** experimental ** | template/parser.py | old_style | lmr/Template-Toolkit-Python | python | def old_style(self):
'Pop the current parser style and revert to the previous one.\n\n See new_style(). ** experimental **\n '
if (len(self.style) <= 1):
raise Error('only 1 parser style remaining')
self.style.pop()
return self.style[(- 1)] |
def location(self):
'Return Python comment indicating current parser file and line.'
if (not self.file_info):
return '\n'
line = self.line
info = self.fileinfo[(- 1)]
file = ((info and (info.path or info.name)) or '(unknown template)')
line = re.sub('-.*', '', str(line))
return ('#line %s "%s"\n' % (line, file)) | -7,369,179,262,191,341,000 | Return Python comment indicating current parser file and line. | template/parser.py | location | lmr/Template-Toolkit-Python | python | def location(self):
if (not self.file_info):
return '\n'
line = self.line
info = self.fileinfo[(- 1)]
file = ((info and (info.path or info.name)) or '(unknown template)')
line = re.sub('-.*', , str(line))
return ('#line %s "%s"\n' % (line, file)) |
def parse(self, text, info=None):
'Parses the text string, text, and returns a dictionary\n representing the compiled template block(s) as Python code, in the\n format expected by template.document.\n '
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if (tokens is None):
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {'BLOCK': block, 'DEFBLOCKS': self.defblock, 'METADATA': self.metadata}
else:
return None | 5,183,685,720,271,077,000 | Parses the text string, text, and returns a dictionary
representing the compiled template block(s) as Python code, in the
format expected by template.document. | template/parser.py | parse | lmr/Template-Toolkit-Python | python | def parse(self, text, info=None):
'Parses the text string, text, and returns a dictionary\n representing the compiled template block(s) as Python code, in the\n format expected by template.document.\n '
self.defblock = {}
self.metadata = {}
tokens = self.split_text(text)
if (tokens is None):
return None
self.fileinfo.append(info)
block = self._parse(tokens, info)
self.fileinfo.pop()
if block:
return {'BLOCK': block, 'DEFBLOCKS': self.defblock, 'METADATA': self.metadata}
else:
return None |
def split_text(self, text):
'Split input template text into directives and raw text chunks.'
tokens = []
line = 1
style = self.style[(- 1)]
def make_splitter(delims):
return re.compile(('(?s)(.*?)%s(.*?)%s' % delims))
splitter = make_splitter((style['START_TAG'], style['END_TAG']))
while True:
match = splitter.match(text)
if (not match):
break
text = text[match.end():]
(pre, dir) = (match.group(1), match.group(2))
prelines = pre.count('\n')
dirlines = dir.count('\n')
postlines = 0
if dir.startswith('#'):
match = re.search((CHOMP_FLAGS + '$'), dir)
if match:
dir = match.group()
else:
dir = ''
else:
match = re.match(('(%s)?\\s*' % CHOMP_FLAGS), dir)
chomp = Chomp(((match and match.group(1)) or style['PRE_CHOMP']))
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
match = re.search(('\\s*(%s)?\\s*$' % CHOMP_FLAGS), dir)
chomp = Chomp(((match and match.group(1)) or style['POST_CHOMP']))
if match:
dir = dir[:match.start()]
(text, postlines) = POST_CHOMP[chomp](text, postlines)
if pre:
if style['INTERPOLATE']:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(['TEXT', pre])
line += prelines
if dir:
match = re.match('(?i)TAGS\\s+(.*)', dir)
if match:
tags = re.split('\\s+', match.group(1))
if (len(tags) > 1):
splitter = make_splitter(tuple((re.escape(x) for x in tags[:2])))
elif (tags[0] in TAG_STYLE):
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write(('Invalid TAGS style: %s' % tags[0]))
else:
if (dirlines > 0):
line_range = ('%d-%d' % (line, (line + dirlines)))
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += (dirlines + postlines)
if text:
if style['INTERPOLATE']:
tokens.append([text, line, 'ITEXT'])
else:
tokens.extend(['TEXT', text])
return tokens | 758,487,461,039,575,700 | Split input template text into directives and raw text chunks. | template/parser.py | split_text | lmr/Template-Toolkit-Python | python | def split_text(self, text):
tokens = []
line = 1
style = self.style[(- 1)]
def make_splitter(delims):
return re.compile(('(?s)(.*?)%s(.*?)%s' % delims))
splitter = make_splitter((style['START_TAG'], style['END_TAG']))
while True:
match = splitter.match(text)
if (not match):
break
text = text[match.end():]
(pre, dir) = (match.group(1), match.group(2))
prelines = pre.count('\n')
dirlines = dir.count('\n')
postlines = 0
if dir.startswith('#'):
match = re.search((CHOMP_FLAGS + '$'), dir)
if match:
dir = match.group()
else:
dir =
else:
match = re.match(('(%s)?\\s*' % CHOMP_FLAGS), dir)
chomp = Chomp(((match and match.group(1)) or style['PRE_CHOMP']))
if match:
dir = dir[match.end():]
pre = PRE_CHOMP[chomp](pre)
match = re.search(('\\s*(%s)?\\s*$' % CHOMP_FLAGS), dir)
chomp = Chomp(((match and match.group(1)) or style['POST_CHOMP']))
if match:
dir = dir[:match.start()]
(text, postlines) = POST_CHOMP[chomp](text, postlines)
if pre:
if style['INTERPOLATE']:
tokens.append([pre, line, 'ITEXT'])
else:
tokens.extend(['TEXT', pre])
line += prelines
if dir:
match = re.match('(?i)TAGS\\s+(.*)', dir)
if match:
tags = re.split('\\s+', match.group(1))
if (len(tags) > 1):
splitter = make_splitter(tuple((re.escape(x) for x in tags[:2])))
elif (tags[0] in TAG_STYLE):
splitter = make_splitter(TAG_STYLE[tags[0]])
else:
sys.stderr.write(('Invalid TAGS style: %s' % tags[0]))
else:
if (dirlines > 0):
line_range = ('%d-%d' % (line, (line + dirlines)))
else:
line_range = str(line)
tokens.append([dir, line_range, self.tokenise_directive(dir)])
line += (dirlines + postlines)
if text:
if style['INTERPOLATE']:
tokens.append([text, line, 'ITEXT'])
else:
tokens.extend(['TEXT', text])
return tokens |
def _comment(self, token):
'Tokenizes a comment.'
return () | -2,133,685,928,393,501,700 | Tokenizes a comment. | template/parser.py | _comment | lmr/Template-Toolkit-Python | python | def _comment(self, token):
return () |
def _string(self, quote, token):
'Tokenizes a string.'
if (quote == '"'):
if re.search('[$\\\\]', token):
token = re.sub('\\\\([\\\\"])', '\\1', token)
token = re.sub('\\\\([^$nrt])', '\\1', token)
token = re.sub('\\\\([nrt])', (lambda m: ESCAPE[m.group(1)]), token)
return ((['"', '"'] + self.interpolate_text(token)) + ['"', '"'])
else:
return ('LITERAL', ('scalar(%r)' % token))
else:
token = re.sub('\\\\(.)', (lambda m: m.group((m.group(1) in "'\\"))), token)
return ('LITERAL', ('scalar(%r)' % token)) | 753,292,761,794,894,700 | Tokenizes a string. | template/parser.py | _string | lmr/Template-Toolkit-Python | python | def _string(self, quote, token):
if (quote == '"'):
if re.search('[$\\\\]', token):
token = re.sub('\\\\([\\\\"])', '\\1', token)
token = re.sub('\\\\([^$nrt])', '\\1', token)
token = re.sub('\\\\([nrt])', (lambda m: ESCAPE[m.group(1)]), token)
return ((['"', '"'] + self.interpolate_text(token)) + ['"', '"'])
else:
return ('LITERAL', ('scalar(%r)' % token))
else:
token = re.sub('\\\\(.)', (lambda m: m.group((m.group(1) in "'\\"))), token)
return ('LITERAL', ('scalar(%r)' % token)) |
def _number(self, token):
'Tokenizes a number.'
return ('NUMBER', ('scalar(%s)' % token)) | 4,641,070,584,772,736,000 | Tokenizes a number. | template/parser.py | _number | lmr/Template-Toolkit-Python | python | def _number(self, token):
return ('NUMBER', ('scalar(%s)' % token)) |
def _filename(self, token):
'Tokenizes a filename.'
return ('FILENAME', token) | -3,104,584,728,914,155,000 | Tokenizes a filename. | template/parser.py | _filename | lmr/Template-Toolkit-Python | python | def _filename(self, token):
return ('FILENAME', token) |
def _identifier(self, token):
'Tokenizes an identifier.'
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if (toktype is not None):
return (toktype, uctoken)
else:
return ('IDENT', token) | 990,242,196,787,017,100 | Tokenizes an identifier. | template/parser.py | _identifier | lmr/Template-Toolkit-Python | python | def _identifier(self, token):
if self.anycase:
uctoken = token.upper()
else:
uctoken = token
toktype = self.lextable.get(uctoken)
if (toktype is not None):
return (toktype, uctoken)
else:
return ('IDENT', token) |
def _word(self, token):
'Tokenizes an unquoted word or symbol .'
return (self.lextable.get(token, 'UNQUOTED'), token) | 6,220,761,228,759,049,000 | Tokenizes an unquoted word or symbol . | template/parser.py | _word | lmr/Template-Toolkit-Python | python | def _word(self, token):
return (self.lextable.get(token, 'UNQUOTED'), token) |
def tokenise_directive(self, dirtext):
'Called by the private _parse() method when it encounters a\n DIRECTIVE token in the list provided by the split_text() or\n interpolate_text() methods.\n\n The method splits the directive into individual tokens as\n recognised by the parser grammar (see template.grammar for\n details). It constructs a list of tokens each represented by 2\n elements, as per split_text() et al. The first element contains\n the token type, the second the token itself.\n\n The method tokenises the string using a complex (but fast) regex.\n For a deeper understanding of the regex magic at work here, see\n Jeffrey Friedl\'s excellent book "Mastering Regular Expressions",\n from O\'Reilly, ISBN 1-56592-257-3\n\n Returns the list of chunks (each one being 2 elements) identified\n in the directive text.\n '
tokens = []
for match in GRAMMAR.finditer(dirtext):
for (indices, method) in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens | 3,093,818,789,588,920,300 | Called by the private _parse() method when it encounters a
DIRECTIVE token in the list provided by the split_text() or
interpolate_text() methods.
The method splits the directive into individual tokens as
recognised by the parser grammar (see template.grammar for
details). It constructs a list of tokens each represented by 2
elements, as per split_text() et al. The first element contains
the token type, the second the token itself.
The method tokenises the string using a complex (but fast) regex.
For a deeper understanding of the regex magic at work here, see
Jeffrey Friedl's excellent book "Mastering Regular Expressions",
from O'Reilly, ISBN 1-56592-257-3
Returns the list of chunks (each one being 2 elements) identified
in the directive text. | template/parser.py | tokenise_directive | lmr/Template-Toolkit-Python | python | def tokenise_directive(self, dirtext):
'Called by the private _parse() method when it encounters a\n DIRECTIVE token in the list provided by the split_text() or\n interpolate_text() methods.\n\n The method splits the directive into individual tokens as\n recognised by the parser grammar (see template.grammar for\n details). It constructs a list of tokens each represented by 2\n elements, as per split_text() et al. The first element contains\n the token type, the second the token itself.\n\n The method tokenises the string using a complex (but fast) regex.\n For a deeper understanding of the regex magic at work here, see\n Jeffrey Friedl\'s excellent book "Mastering Regular Expressions",\n from O\'Reilly, ISBN 1-56592-257-3\n\n Returns the list of chunks (each one being 2 elements) identified\n in the directive text.\n '
tokens = []
for match in GRAMMAR.finditer(dirtext):
for (indices, method) in self.tokenize:
if match.group(indices[0]):
tokens.extend(method(*list(map(match.group, indices))))
break
return tokens |
def _parse(self, tokens, info):
'Parses the list of input tokens passed by reference and returns\n an object which contains the compiled representation of the\n template.\n\n This is the main parser DFA loop. See embedded comments for\n further details.\n '
self.grammar.install_factory(self.factory)
stack = [[0, None]]
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = (info and info.name)
self.inpython = 0
value = None
while True:
stateno = stack[(- 1)][0]
state = self.states[stateno]
if ('ACTIONS' in state):
while ((token is None) and tokens):
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
(text, self.line, token) = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = (token + [';', ';'])
token = None
elif (token == 'ITEXT'):
if in_python:
token = 'TEXT'
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None
else:
if (token == '"'):
in_string = (not in_string)
value = ((tokens and tokens.pop(0)) or None)
if (token is None):
token = ''
lookup = state['ACTIONS'].get(token)
if lookup:
action = lookup
else:
action = state.get('DEFAULT')
else:
action = state.get('DEFAULT')
if (action is None):
break
if (action > 0):
stack.append([action, value])
token = value = None
else:
(lhs, len_, code) = self.rules[(- action)]
if (not action):
status = ACCEPT
if (not code):
code = (lambda *arg: (((len(arg) >= 2) and arg[1]) or None))
if (len_ > 0):
codevars = [x[1] for x in stack[(- len_):]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
if (len_ > 0):
stack[(- len_):] = []
if (status == ACCEPT):
return coderet
elif (status == ABORT):
return None
elif (status == ERROR):
break
stack.append([self.states[stack[(- 1)][0]].get('GOTOS', {}).get(lhs), coderet])
if (value is None):
self._parse_error('unexpected end of input', info.name)
elif (value == ';'):
self._parse_error('unexpected end of directive', info.name, text)
else:
self._parse_error(('unexpected token (%s)' % util.unscalar_lex(value)), info.name, text) | 3,020,846,264,041,005,000 | Parses the list of input tokens passed by reference and returns
an object which contains the compiled representation of the
template.
This is the main parser DFA loop. See embedded comments for
further details. | template/parser.py | _parse | lmr/Template-Toolkit-Python | python | def _parse(self, tokens, info):
'Parses the list of input tokens passed by reference and returns\n an object which contains the compiled representation of the\n template.\n\n This is the main parser DFA loop. See embedded comments for\n further details.\n '
self.grammar.install_factory(self.factory)
stack = [[0, None]]
coderet = None
token = None
in_string = False
in_python = False
status = CONTINUE
lhs = None
text = None
self.line = 0
self.file = (info and info.name)
self.inpython = 0
value = None
while True:
stateno = stack[(- 1)][0]
state = self.states[stateno]
if ('ACTIONS' in state):
while ((token is None) and tokens):
token = tokens.pop(0)
if isinstance(token, (list, tuple)):
(text, self.line, token) = util.unpack(token, 3)
if isinstance(token, (list, tuple)):
tokens[:0] = (token + [';', ';'])
token = None
elif (token == 'ITEXT'):
if in_python:
token = 'TEXT'
value = text
else:
tokens[:0] = self.interpolate_text(text, self.line)
token = None
else:
if (token == '"'):
in_string = (not in_string)
value = ((tokens and tokens.pop(0)) or None)
if (token is None):
token =
lookup = state['ACTIONS'].get(token)
if lookup:
action = lookup
else:
action = state.get('DEFAULT')
else:
action = state.get('DEFAULT')
if (action is None):
break
if (action > 0):
stack.append([action, value])
token = value = None
else:
(lhs, len_, code) = self.rules[(- action)]
if (not action):
status = ACCEPT
if (not code):
code = (lambda *arg: (((len(arg) >= 2) and arg[1]) or None))
if (len_ > 0):
codevars = [x[1] for x in stack[(- len_):]]
else:
codevars = []
try:
coderet = code(self, *codevars)
except TemplateException as e:
self._parse_error(str(e), info.name)
if (len_ > 0):
stack[(- len_):] = []
if (status == ACCEPT):
return coderet
elif (status == ABORT):
return None
elif (status == ERROR):
break
stack.append([self.states[stack[(- 1)][0]].get('GOTOS', {}).get(lhs), coderet])
if (value is None):
self._parse_error('unexpected end of input', info.name)
elif (value == ';'):
self._parse_error('unexpected end of directive', info.name, text)
else:
self._parse_error(('unexpected token (%s)' % util.unscalar_lex(value)), info.name, text) |
def _parse_error(self, msg, name, text=None):
'Method used to handle errors encountered during the parse process\n in the _parse() method.\n '
line = (self.line or 'unknown')
if (text is not None):
msg += ('\n [%% %s %%]' % text)
raise TemplateException('parse', ('%s line %s: %s' % (name, line, msg))) | -1,244,724,088,228,085,500 | Method used to handle errors encountered during the parse process
in the _parse() method. | template/parser.py | _parse_error | lmr/Template-Toolkit-Python | python | def _parse_error(self, msg, name, text=None):
'Method used to handle errors encountered during the parse process\n in the _parse() method.\n '
line = (self.line or 'unknown')
if (text is not None):
msg += ('\n [%% %s %%]' % text)
raise TemplateException('parse', ('%s line %s: %s' % (name, line, msg))) |
def define_block(self, name, block):
'Called by the parser \'defblock\' rule when a BLOCK definition is\n encountered in the template.\n\n The name of the block is passed in the first parameter and a\n reference to the compiled block is passed in the second. This\n method stores the block in the self.defblock dictionary which has\n been initialised by parse() and will later be used by the same\n method to call the store() method on the calling cache to define\n the block "externally".\n '
if (self.defblock is None):
return None
self.defblock[name] = block
return None | -7,079,218,376,014,275,000 | Called by the parser 'defblock' rule when a BLOCK definition is
encountered in the template.
The name of the block is passed in the first parameter and a
reference to the compiled block is passed in the second. This
method stores the block in the self.defblock dictionary which has
been initialised by parse() and will later be used by the same
method to call the store() method on the calling cache to define
the block "externally". | template/parser.py | define_block | lmr/Template-Toolkit-Python | python | def define_block(self, name, block):
'Called by the parser \'defblock\' rule when a BLOCK definition is\n encountered in the template.\n\n The name of the block is passed in the first parameter and a\n reference to the compiled block is passed in the second. This\n method stores the block in the self.defblock dictionary which has\n been initialised by parse() and will later be used by the same\n method to call the store() method on the calling cache to define\n the block "externally".\n '
if (self.defblock is None):
return None
self.defblock[name] = block
return None |
def interpolate_text(self, text, line=0):
'Examines text looking for any variable references embedded\n like $this or like ${ this }.\n '
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = (match.group(3) or match.group(4))
dir = match.group(2)
if pre:
line += pre.count('\n')
tokens.extend(('TEXT', pre.replace('\\$', '$')))
if var:
line += dir.count('\n')
tokens.append([dir, line, self.tokenise_directive(var)])
elif dir:
line += dir.count('\n')
tokens.extend(('TEXT', dir))
return tokens | -3,927,113,451,080,370,000 | Examines text looking for any variable references embedded
like $this or like ${ this }. | template/parser.py | interpolate_text | lmr/Template-Toolkit-Python | python | def interpolate_text(self, text, line=0):
'Examines text looking for any variable references embedded\n like $this or like ${ this }.\n '
tokens = []
for match in QUOTED_STRING.finditer(text):
pre = match.group(1)
var = (match.group(3) or match.group(4))
dir = match.group(2)
if pre:
line += pre.count('\n')
tokens.extend(('TEXT', pre.replace('\\$', '$')))
if var:
line += dir.count('\n')
tokens.append([dir, line, self.tokenise_directive(var)])
elif dir:
line += dir.count('\n')
tokens.extend(('TEXT', dir))
return tokens |
@classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
' Create a new company having the name passed as parameter.\n A chart of accounts will be installed to this company: the same as the current company one.\n The current user will get access to this company.\n\n :param company_name: The name of the company.\n :return: A dictionary will be returned containing all relevant accounting data for testing.\n '
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = ([('company_id', '=', company.id)] + domain)
account = None
if template_code:
account = cls.env['account.account'].search((domain + [('code', '=like', (template_code + '%'))]), limit=1)
if (not account):
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({'name': company_name, 'currency_id': currency.id, **kwargs})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {'company': company, 'currency': company.currency_id, 'default_account_revenue': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)], limit=1), 'default_account_expense': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)], limit=1), 'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [('user_type_id.type', '=', 'receivable')]), 'default_account_payable': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id.type', '=', 'payable')], limit=1), 'default_account_assets': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)], limit=1), 'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'), 'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'), 'default_journal_misc': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'general')], limit=1), 'default_journal_sale': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'sale')], limit=1), 'default_journal_purchase': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'purchase')], limit=1), 'default_journal_bank': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'bank')], limit=1), 'default_journal_cash': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'cash')], limit=1), 'default_tax_sale': company.account_sale_tax_id, 'default_tax_purchase': company.account_purchase_tax_id} | -8,222,547,644,197,628,000 | Create a new company having the name passed as parameter.
A chart of accounts will be installed to this company: the same as the current company one.
The current user will get access to this company.
:param company_name: The name of the company.
:return: A dictionary will be returned containing all relevant accounting data for testing. | odoo/base-addons/account/tests/account_test_savepoint.py | setup_company_data | LucasBorges-Santos/docker-odoo | python | @classmethod
def setup_company_data(cls, company_name, chart_template, **kwargs):
' Create a new company having the name passed as parameter.\n A chart of accounts will be installed to this company: the same as the current company one.\n The current user will get access to this company.\n\n :param company_name: The name of the company.\n :return: A dictionary will be returned containing all relevant accounting data for testing.\n '
def search_account(company, chart_template, field_name, domain):
template_code = chart_template[field_name].code
domain = ([('company_id', '=', company.id)] + domain)
account = None
if template_code:
account = cls.env['account.account'].search((domain + [('code', '=like', (template_code + '%'))]), limit=1)
if (not account):
account = cls.env['account.account'].search(domain, limit=1)
return account
currency = chart_template.currency_id
company = cls.env['res.company'].create({'name': company_name, 'currency_id': currency.id, **kwargs})
cls.env.user.company_ids |= company
chart_template.try_loading(company=company)
company.write({'currency_id': kwargs.get('currency_id', currency.id)})
return {'company': company, 'currency': company.currency_id, 'default_account_revenue': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_revenue').id)], limit=1), 'default_account_expense': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_expenses').id)], limit=1), 'default_account_receivable': search_account(company, chart_template, 'property_account_receivable_id', [('user_type_id.type', '=', 'receivable')]), 'default_account_payable': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id.type', '=', 'payable')], limit=1), 'default_account_assets': cls.env['account.account'].search([('company_id', '=', company.id), ('user_type_id', '=', cls.env.ref('account.data_account_type_current_assets').id)], limit=1), 'default_account_tax_sale': company.account_sale_tax_id.mapped('invoice_repartition_line_ids.account_id'), 'default_account_tax_purchase': company.account_purchase_tax_id.mapped('invoice_repartition_line_ids.account_id'), 'default_journal_misc': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'general')], limit=1), 'default_journal_sale': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'sale')], limit=1), 'default_journal_purchase': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'purchase')], limit=1), 'default_journal_bank': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'bank')], limit=1), 'default_journal_cash': cls.env['account.journal'].search([('company_id', '=', company.id), ('type', '=', 'cash')], limit=1), 'default_tax_sale': company.account_sale_tax_id, 'default_tax_purchase': company.account_purchase_tax_id} |
@contextmanager
def mocked_today(self, forced_today):
' Helper to make easily a python "with statement" mocking the "today" date.\n :param forced_today: The expected "today" date as a str or Date object.\n :return: An object to be used like \'with self.mocked_today(<today>):\'.\n '
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
(yield) | -6,320,081,236,697,501,000 | Helper to make easily a python "with statement" mocking the "today" date.
:param forced_today: The expected "today" date as a str or Date object.
:return: An object to be used like 'with self.mocked_today(<today>):'. | odoo/base-addons/account/tests/account_test_savepoint.py | mocked_today | LucasBorges-Santos/docker-odoo | python | @contextmanager
def mocked_today(self, forced_today):
' Helper to make easily a python "with statement" mocking the "today" date.\n :param forced_today: The expected "today" date as a str or Date object.\n :return: An object to be used like \'with self.mocked_today(<today>):\'.\n '
if isinstance(forced_today, str):
forced_today_date = fields.Date.from_string(forced_today)
forced_today_datetime = fields.Datetime.from_string(forced_today)
elif isinstance(forced_today, datetime.datetime):
forced_today_datetime = forced_today
forced_today_date = forced_today_datetime.date()
else:
forced_today_date = forced_today
forced_today_datetime = datetime.datetime.combine(forced_today_date, datetime.time())
def today(*args, **kwargs):
return forced_today_date
with patch.object(fields.Date, 'today', today):
with patch.object(fields.Date, 'context_today', today):
with patch.object(fields.Datetime, 'now', return_value=forced_today_datetime):
(yield) |
def build_model(self):
'\n Build the custom CNN for the CIFAR-10 dataset.\n '
self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, self.config.data['image_size'], self.config.data['image_size'], self.config.data['num_channels']), name='X')
self.y = tf.compat.v1.placeholder(tf.int32, shape=(None, self.config.data['num_categories']), name='y')
self.train = tf.compat.v1.placeholder(tf.bool)
with tf.name_scope('cnn'):
self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.flatten = tf.reshape(self.drop3, [(- 1), self.config.cifar10_cnn['fc1_nb_units']])
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={'is_training': self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn['fc1_nb_units'])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data['num_categories'], activation_fn=None)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
with tf.name_scope('training_op'):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
with tf.name_scope('accuracy'):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32)) | 7,480,506,121,187,723,000 | Build the custom CNN for the CIFAR-10 dataset. | rklearn/tests/it/cifar10_cnn.py | build_model | rejux/rklearn-lib | python | def build_model(self):
'\n \n '
self.X = tf.compat.v1.placeholder(tf.float32, shape=(None, self.config.data['image_size'], self.config.data['image_size'], self.config.data['num_channels']), name='X')
self.y = tf.compat.v1.placeholder(tf.int32, shape=(None, self.config.data['num_categories']), name='y')
self.train = tf.compat.v1.placeholder(tf.bool)
with tf.name_scope('cnn'):
self.conv1 = tf.layers.conv2d(self.X, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3, self.config.cifar10_cnn['num_filters'], self.config.cifar10_cnn['filter_size'], padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn['keep_prob'], training=self.train)
self.flatten = tf.reshape(self.drop3, [(- 1), self.config.cifar10_cnn['fc1_nb_units']])
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm, normalizer_params={'is_training': self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn['fc1_nb_units'])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data['num_categories'], activation_fn=None)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
with tf.name_scope('training_op'):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
with tf.name_scope('accuracy'):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32)) |
def reconstruct_wav(wavmat, stride_factor=0.5):
'\n Reconstructs the audiofile from sliced matrix wavmat\n '
window_length = wavmat.shape[1]
window_stride = int((stride_factor * window_length))
wav_length = (((wavmat.shape[0] - 1) * window_stride) + window_length)
wav_recon = np.zeros((1, wav_length))
for k in range(wavmat.shape[0]):
wav_beg = (k * window_stride)
wav_end = (wav_beg + window_length)
wav_recon[0, wav_beg:wav_end] += wavmat[k, :]
noverlap = int(np.ceil((1 / stride_factor)))
scale_ = ((1 / float(noverlap)) * np.ones((1, wav_length)))
for s in range((noverlap - 1)):
s_beg = (s * window_stride)
s_end = (s_beg + window_stride)
scale_[0, s_beg:s_end] = (1 / (s + 1))
scale_[0, ((- s_beg) - 1):(- s_end):(- 1)] = (1 / (s + 1))
return (wav_recon * scale_) | 8,180,927,570,105,799,000 | Reconstructs the audiofile from sliced matrix wavmat | data_ops.py | reconstruct_wav | deepakbaby/isegan | python | def reconstruct_wav(wavmat, stride_factor=0.5):
'\n \n '
window_length = wavmat.shape[1]
window_stride = int((stride_factor * window_length))
wav_length = (((wavmat.shape[0] - 1) * window_stride) + window_length)
wav_recon = np.zeros((1, wav_length))
for k in range(wavmat.shape[0]):
wav_beg = (k * window_stride)
wav_end = (wav_beg + window_length)
wav_recon[0, wav_beg:wav_end] += wavmat[k, :]
noverlap = int(np.ceil((1 / stride_factor)))
scale_ = ((1 / float(noverlap)) * np.ones((1, wav_length)))
for s in range((noverlap - 1)):
s_beg = (s * window_stride)
s_end = (s_beg + window_stride)
scale_[0, s_beg:s_end] = (1 / (s + 1))
scale_[0, ((- s_beg) - 1):(- s_end):(- 1)] = (1 / (s + 1))
return (wav_recon * scale_) |
def pre_emph(x, coeff=0.95):
'\n Apply pre_emph on 2d data (batch_size x window_length)\n '
x0 = x[:, 0]
x0 = np.expand_dims(x0, axis=1)
diff = (x[:, 1:] - (coeff * x[:, :(- 1)]))
x_preemph = np.concatenate((x0, diff), axis=1)
if (not (x.shape == x_preemph.shape)):
print('ERROR: Pre-emphasis is wrong')
return x_preemph | -3,590,206,272,014,839,300 | Apply pre_emph on 2d data (batch_size x window_length) | data_ops.py | pre_emph | deepakbaby/isegan | python | def pre_emph(x, coeff=0.95):
'\n \n '
x0 = x[:, 0]
x0 = np.expand_dims(x0, axis=1)
diff = (x[:, 1:] - (coeff * x[:, :(- 1)]))
x_preemph = np.concatenate((x0, diff), axis=1)
if (not (x.shape == x_preemph.shape)):
print('ERROR: Pre-emphasis is wrong')
return x_preemph |
def de_emph(y, coeff=0.95):
'\n Apply de_emphasis on test data: works only on 1d data\n '
if (coeff <= 0):
return y
x = np.zeros((y.shape[0],), dtype=np.float32)
x[0] = y[0]
for n in range(1, y.shape[0], 1):
x[n] = ((coeff * x[(n - 1)]) + y[n])
return x | 3,612,364,311,839,633,400 | Apply de_emphasis on test data: works only on 1d data | data_ops.py | de_emph | deepakbaby/isegan | python | def de_emph(y, coeff=0.95):
'\n \n '
if (coeff <= 0):
return y
x = np.zeros((y.shape[0],), dtype=np.float32)
x[0] = y[0]
for n in range(1, y.shape[0], 1):
x[n] = ((coeff * x[(n - 1)]) + y[n])
return x |
@requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
'Test image visual'
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (((size[1] - 10), (size[0] - 10)) + ((3,) if is_3d else ()))
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), ('visuals/image%s.png' % ('_rgb' if is_3d else '_mono'))) | -1,504,954,905,871,808,300 | Test image visual | vispy/visuals/tests/test_image.py | test_image | 3DAlgoLab/vispy | python | @requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (((size[1] - 10), (size[0] - 10)) + ((3,) if is_3d else ()))
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), ('visuals/image%s.png' % ('_rgb' if is_3d else '_mono'))) |
@requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels, clim_on_init, data_on_init):
'Test image visual with clims and gamma on shader.'
size = (40, 40)
if (texture_format == '__dtype__'):
texture_format = input_dtype
shape = ((size + (num_channels,)) if (num_channels > 0) else size)
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
(orig_clim, new_clim) = _get_orig_and_new_clims(input_dtype)
is_16int_cpu_scaled = ((np.dtype(input_dtype).itemsize >= 2) and np.issubdtype(input_dtype, np.integer) and (texture_format is None))
clim_atol = (2 if is_16int_cpu_scaled else 1)
gamma_atol = (3 if is_16int_cpu_scaled else 2)
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
set_data_fails = ((num_channels != 4) and (texture_format is not None) and (texture_format != 'auto'))
with TestingCanvas(size=size[::(- 1)], bgcolor='w') as c:
image = Image(cmap='grays', texture_format=texture_format, parent=c.scene, **kwargs)
if (not data_on_init):
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = (rendered.shape[0] // data.shape[0])
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = ((np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0]))
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render((scaled_data ** 2), rendered3, rendered2, atol=gamma_atol) | 2,548,137,327,494,589,400 | Test image visual with clims and gamma on shader. | vispy/visuals/tests/test_image.py | test_image_clims_and_gamma | 3DAlgoLab/vispy | python | @requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels, clim_on_init, data_on_init):
size = (40, 40)
if (texture_format == '__dtype__'):
texture_format = input_dtype
shape = ((size + (num_channels,)) if (num_channels > 0) else size)
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
(orig_clim, new_clim) = _get_orig_and_new_clims(input_dtype)
is_16int_cpu_scaled = ((np.dtype(input_dtype).itemsize >= 2) and np.issubdtype(input_dtype, np.integer) and (texture_format is None))
clim_atol = (2 if is_16int_cpu_scaled else 1)
gamma_atol = (3 if is_16int_cpu_scaled else 2)
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
set_data_fails = ((num_channels != 4) and (texture_format is not None) and (texture_format != 'auto'))
with TestingCanvas(size=size[::(- 1)], bgcolor='w') as c:
image = Image(cmap='grays', texture_format=texture_format, parent=c.scene, **kwargs)
if (not data_on_init):
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = (rendered.shape[0] // data.shape[0])
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = ((np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0]))
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render((scaled_data ** 2), rendered3, rendered2, atol=gamma_atol) |
@requires_application()
def test_image_vertex_updates():
'Test image visual coordinates are only built when needed.'
size = (40, 40)
with TestingCanvas(size=size, bgcolor='w') as c:
shape = (size + (3,))
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(image, '_build_vertex_data', wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock()
c.render()
build_vertex_mock.assert_not_called()
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
data = data[:(- 5), :(- 5)]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once() | -4,843,310,889,639,677,000 | Test image visual coordinates are only built when needed. | vispy/visuals/tests/test_image.py | test_image_vertex_updates | 3DAlgoLab/vispy | python | @requires_application()
def test_image_vertex_updates():
size = (40, 40)
with TestingCanvas(size=size, bgcolor='w') as c:
shape = (size + (3,))
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(image, '_build_vertex_data', wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock()
c.render()
build_vertex_mock.assert_not_called()
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
data = data[:(- 5), :(- 5)]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once() |
@property
def buy_holding_pnl(self):
'\n [float] 买方向当日持仓盈亏\n '
return (((self.last_price - self.buy_avg_holding_price) * self.buy_quantity) * self.contract_multiplier) | 5,013,742,136,807,236,000 | [float] 买方向当日持仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_holding_pnl | HackReborn/rqalpha | python | @property
def buy_holding_pnl(self):
'\n \n '
return (((self.last_price - self.buy_avg_holding_price) * self.buy_quantity) * self.contract_multiplier) |
@property
def sell_holding_pnl(self):
'\n [float] 卖方向当日持仓盈亏\n '
return (((self.sell_avg_holding_price - self.last_price) * self.sell_quantity) * self.contract_multiplier) | 4,951,330,317,483,833,000 | [float] 卖方向当日持仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_holding_pnl | HackReborn/rqalpha | python | @property
def sell_holding_pnl(self):
'\n \n '
return (((self.sell_avg_holding_price - self.last_price) * self.sell_quantity) * self.contract_multiplier) |
@property
def buy_realized_pnl(self):
'\n [float] 买方向平仓盈亏\n '
return self._buy_realized_pnl | -5,964,010,901,328,179,000 | [float] 买方向平仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_realized_pnl | HackReborn/rqalpha | python | @property
def buy_realized_pnl(self):
'\n \n '
return self._buy_realized_pnl |
@property
def sell_realized_pnl(self):
'\n [float] 卖方向平仓盈亏\n '
return self._sell_realized_pnl | -6,855,069,120,863,464,000 | [float] 卖方向平仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_realized_pnl | HackReborn/rqalpha | python | @property
def sell_realized_pnl(self):
'\n \n '
return self._sell_realized_pnl |
@property
def holding_pnl(self):
'\n [float] 当日持仓盈亏\n '
return (self.buy_holding_pnl + self.sell_holding_pnl) | 4,053,345,582,904,855,600 | [float] 当日持仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | holding_pnl | HackReborn/rqalpha | python | @property
def holding_pnl(self):
'\n \n '
return (self.buy_holding_pnl + self.sell_holding_pnl) |
@property
def realized_pnl(self):
'\n [float] 当日平仓盈亏\n '
return (self.buy_realized_pnl + self.sell_realized_pnl) | 7,618,758,669,661,730,000 | [float] 当日平仓盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | realized_pnl | HackReborn/rqalpha | python | @property
def realized_pnl(self):
'\n \n '
return (self.buy_realized_pnl + self.sell_realized_pnl) |
@property
def buy_daily_pnl(self):
'\n [float] 当日买方向盈亏\n '
return (self.buy_holding_pnl + self.buy_realized_pnl) | -9,029,934,724,672,357,000 | [float] 当日买方向盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_daily_pnl | HackReborn/rqalpha | python | @property
def buy_daily_pnl(self):
'\n \n '
return (self.buy_holding_pnl + self.buy_realized_pnl) |
@property
def sell_daily_pnl(self):
'\n [float] 当日卖方向盈亏\n '
return (self.sell_holding_pnl + self.sell_realized_pnl) | 8,487,000,099,801,001,000 | [float] 当日卖方向盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_daily_pnl | HackReborn/rqalpha | python | @property
def sell_daily_pnl(self):
'\n \n '
return (self.sell_holding_pnl + self.sell_realized_pnl) |
@property
def daily_pnl(self):
'\n [float] 当日盈亏\n '
return (self.holding_pnl + self.realized_pnl) | 6,437,207,131,527,689,000 | [float] 当日盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | daily_pnl | HackReborn/rqalpha | python | @property
def daily_pnl(self):
'\n \n '
return (self.holding_pnl + self.realized_pnl) |
@property
def buy_pnl(self):
'\n [float] 买方向累计盈亏\n '
return (((self.last_price - self._buy_avg_open_price) * self.buy_quantity) * self.contract_multiplier) | 1,141,625,177,052,609,700 | [float] 买方向累计盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_pnl | HackReborn/rqalpha | python | @property
def buy_pnl(self):
'\n \n '
return (((self.last_price - self._buy_avg_open_price) * self.buy_quantity) * self.contract_multiplier) |
@property
def sell_pnl(self):
'\n [float] 卖方向累计盈亏\n '
return (((self._sell_avg_open_price - self.last_price) * self.sell_quantity) * self.contract_multiplier) | 634,080,540,243,408,300 | [float] 卖方向累计盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_pnl | HackReborn/rqalpha | python | @property
def sell_pnl(self):
'\n \n '
return (((self._sell_avg_open_price - self.last_price) * self.sell_quantity) * self.contract_multiplier) |
@property
def pnl(self):
'\n [float] 累计盈亏\n '
return (self.buy_pnl + self.sell_pnl) | 7,390,324,914,111,732,000 | [float] 累计盈亏 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | pnl | HackReborn/rqalpha | python | @property
def pnl(self):
'\n \n '
return (self.buy_pnl + self.sell_pnl) |
@property
def buy_open_order_quantity(self):
'\n [int] 买方向挂单量\n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.BUY) and (order.position_effect == POSITION_EFFECT.OPEN)))) | 5,819,798,251,725,721,000 | [int] 买方向挂单量 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_open_order_quantity | HackReborn/rqalpha | python | @property
def buy_open_order_quantity(self):
'\n \n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.BUY) and (order.position_effect == POSITION_EFFECT.OPEN)))) |
@property
def sell_open_order_quantity(self):
'\n [int] 卖方向挂单量\n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.SELL) and (order.position_effect == POSITION_EFFECT.OPEN)))) | 7,449,379,844,507,132,000 | [int] 卖方向挂单量 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_open_order_quantity | HackReborn/rqalpha | python | @property
def sell_open_order_quantity(self):
'\n \n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.SELL) and (order.position_effect == POSITION_EFFECT.OPEN)))) |
@property
def buy_close_order_quantity(self):
'\n [int] 买方向挂单量\n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.BUY) and (order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])))) | -1,092,982,336,136,177,500 | [int] 买方向挂单量 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_close_order_quantity | HackReborn/rqalpha | python | @property
def buy_close_order_quantity(self):
'\n \n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.BUY) and (order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])))) |
@property
def sell_close_order_quantity(self):
'\n [int] 卖方向挂单量\n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.SELL) and (order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])))) | -6,132,563,355,507,018,000 | [int] 卖方向挂单量 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_close_order_quantity | HackReborn/rqalpha | python | @property
def sell_close_order_quantity(self):
'\n \n '
return sum((order.unfilled_quantity for order in self.open_orders if ((order.side == SIDE.SELL) and (order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])))) |
@property
def buy_old_quantity(self):
'\n [int] 买方向昨仓\n '
return sum((amount for (price, amount) in self._buy_old_holding_list)) | -7,388,740,906,032,962,000 | [int] 买方向昨仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_old_quantity | HackReborn/rqalpha | python | @property
def buy_old_quantity(self):
'\n \n '
return sum((amount for (price, amount) in self._buy_old_holding_list)) |
@property
def sell_old_quantity(self):
'\n [int] 卖方向昨仓\n '
return sum((amount for (price, amount) in self._sell_old_holding_list)) | -1,031,299,605,695,580,900 | [int] 卖方向昨仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_old_quantity | HackReborn/rqalpha | python | @property
def sell_old_quantity(self):
'\n \n '
return sum((amount for (price, amount) in self._sell_old_holding_list)) |
@property
def buy_today_quantity(self):
'\n [int] 买方向今仓\n '
return sum((amount for (price, amount) in self._buy_today_holding_list)) | 363,529,546,698,532,200 | [int] 买方向今仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_today_quantity | HackReborn/rqalpha | python | @property
def buy_today_quantity(self):
'\n \n '
return sum((amount for (price, amount) in self._buy_today_holding_list)) |
@property
def sell_today_quantity(self):
'\n [int] 卖方向今仓\n '
return sum((amount for (price, amount) in self._sell_today_holding_list)) | 8,170,260,474,280,551,000 | [int] 卖方向今仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_today_quantity | HackReborn/rqalpha | python | @property
def sell_today_quantity(self):
'\n \n '
return sum((amount for (price, amount) in self._sell_today_holding_list)) |
@property
def buy_quantity(self):
'\n [int] 买方向持仓\n '
return (self.buy_old_quantity + self.buy_today_quantity) | -7,454,244,290,921,173,000 | [int] 买方向持仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_quantity | HackReborn/rqalpha | python | @property
def buy_quantity(self):
'\n \n '
return (self.buy_old_quantity + self.buy_today_quantity) |
@property
def sell_quantity(self):
'\n [int] 卖方向持仓\n '
return (self.sell_old_quantity + self.sell_today_quantity) | 5,480,620,548,115,941,000 | [int] 卖方向持仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_quantity | HackReborn/rqalpha | python | @property
def sell_quantity(self):
'\n \n '
return (self.sell_old_quantity + self.sell_today_quantity) |
@property
def closable_buy_quantity(self):
'\n [float] 可平买方向持仓\n '
return (self.buy_quantity - self.sell_close_order_quantity) | -3,778,086,160,350,469,000 | [float] 可平买方向持仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | closable_buy_quantity | HackReborn/rqalpha | python | @property
def closable_buy_quantity(self):
'\n \n '
return (self.buy_quantity - self.sell_close_order_quantity) |
@property
def closable_sell_quantity(self):
'\n [float] 可平卖方向持仓\n '
return (self.sell_quantity - self.buy_close_order_quantity) | -7,572,984,469,376,504,000 | [float] 可平卖方向持仓 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | closable_sell_quantity | HackReborn/rqalpha | python | @property
def closable_sell_quantity(self):
'\n \n '
return (self.sell_quantity - self.buy_close_order_quantity) |
@property
def buy_margin(self):
'\n [float] 买方向持仓保证金\n '
return (self._buy_holding_cost * self.margin_rate) | 6,846,262,282,015,153,000 | [float] 买方向持仓保证金 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_margin | HackReborn/rqalpha | python | @property
def buy_margin(self):
'\n \n '
return (self._buy_holding_cost * self.margin_rate) |
@property
def sell_margin(self):
'\n [float] 卖方向持仓保证金\n '
return (self._sell_holding_cost * self.margin_rate) | 5,543,905,457,630,488,000 | [float] 卖方向持仓保证金 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_margin | HackReborn/rqalpha | python | @property
def sell_margin(self):
'\n \n '
return (self._sell_holding_cost * self.margin_rate) |
@property
def margin(self):
'\n [float] 保证金\n '
return (self.buy_margin + self.sell_margin) | 7,469,897,476,748,673,000 | [float] 保证金 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | margin | HackReborn/rqalpha | python | @property
def margin(self):
'\n \n '
return (self.buy_margin + self.sell_margin) |
@property
def buy_avg_holding_price(self):
'\n [float] 买方向持仓均价\n '
return (0 if (self.buy_quantity == 0) else ((self._buy_holding_cost / self.buy_quantity) / self.contract_multiplier)) | 1,621,258,074,660,974,800 | [float] 买方向持仓均价 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | buy_avg_holding_price | HackReborn/rqalpha | python | @property
def buy_avg_holding_price(self):
'\n \n '
return (0 if (self.buy_quantity == 0) else ((self._buy_holding_cost / self.buy_quantity) / self.contract_multiplier)) |
@property
def sell_avg_holding_price(self):
'\n [float] 卖方向持仓均价\n '
return (0 if (self.sell_quantity == 0) else ((self._sell_holding_cost / self.sell_quantity) / self.contract_multiplier)) | -483,452,772,804,627,260 | [float] 卖方向持仓均价 | rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py | sell_avg_holding_price | HackReborn/rqalpha | python | @property
def sell_avg_holding_price(self):
'\n \n '
return (0 if (self.sell_quantity == 0) else ((self._sell_holding_cost / self.sell_quantity) / self.contract_multiplier)) |
def _prometheus_module_metric_decorator(f: FunctionType):
'\n A Prometheus decorator adding timing metrics to a function.\n This decorator will work on both asynchronous and synchronous functions.\n Note, however, that this function will turn synchronous functions into\n asynchronous ones when used as a decorator.\n :param f: The function for which to capture metrics\n '
module_ = f.__module__.split('.')[(- 1)]
call_key = '{}_{}'.format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return (await f(*args, **kwargs))
else:
return f(*args, **kwargs)
return wrapper | 1,521,078,727,260,751,000 | A Prometheus decorator adding timing metrics to a function.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics | management_layer/metrics.py | _prometheus_module_metric_decorator | girleffect/core-management-layer | python | def _prometheus_module_metric_decorator(f: FunctionType):
'\n A Prometheus decorator adding timing metrics to a function.\n This decorator will work on both asynchronous and synchronous functions.\n Note, however, that this function will turn synchronous functions into\n asynchronous ones when used as a decorator.\n :param f: The function for which to capture metrics\n '
module_ = f.__module__.split('.')[(- 1)]
call_key = '{}_{}'.format(module_, f.__name__)
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=call_key).time():
if asyncio.iscoroutinefunction(f):
return (await f(*args, **kwargs))
else:
return f(*args, **kwargs)
return wrapper |
def _prometheus_class_metric_decorator(f: FunctionType):
'\n A Prometheus decorator adding timing metrics to a function in a class.\n This decorator will work on both asynchronous and synchronous functions.\n Note, however, that this function will turn synchronous functions into\n asynchronous ones when used as a decorator.\n :param f: The function for which to capture metrics\n '
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return (await f(*args, **kwargs))
else:
return f(*args, **kwargs)
return wrapper | 4,118,149,078,184,187,400 | A Prometheus decorator adding timing metrics to a function in a class.
This decorator will work on both asynchronous and synchronous functions.
Note, however, that this function will turn synchronous functions into
asynchronous ones when used as a decorator.
:param f: The function for which to capture metrics | management_layer/metrics.py | _prometheus_class_metric_decorator | girleffect/core-management-layer | python | def _prometheus_class_metric_decorator(f: FunctionType):
'\n A Prometheus decorator adding timing metrics to a function in a class.\n This decorator will work on both asynchronous and synchronous functions.\n Note, however, that this function will turn synchronous functions into\n asynchronous ones when used as a decorator.\n :param f: The function for which to capture metrics\n '
@functools.wraps(f)
async def wrapper(*args, **kwargs):
with H.labels(call=f.__name__).time():
if asyncio.iscoroutinefunction(f):
return (await f(*args, **kwargs))
else:
return f(*args, **kwargs)
return wrapper |
def add_prometheus_metrics_for_module(module_: ModuleType):
"\n Convenience function applying the Prometheus metrics decorator to the\n specified module's functions.\n :param module_: The module to which the instrumentation will be applied\n "
decorate_all_in_module(module_, _prometheus_module_metric_decorator, []) | 8,135,198,164,741,140,000 | Convenience function applying the Prometheus metrics decorator to the
specified module's functions.
:param module_: The module to which the instrumentation will be applied | management_layer/metrics.py | add_prometheus_metrics_for_module | girleffect/core-management-layer | python | def add_prometheus_metrics_for_module(module_: ModuleType):
"\n Convenience function applying the Prometheus metrics decorator to the\n specified module's functions.\n :param module_: The module to which the instrumentation will be applied\n "
decorate_all_in_module(module_, _prometheus_module_metric_decorator, []) |
def add_prometheus_metrics_for_class(klass: Type):
'\n Convenience function applying the Prometheus metrics decorator to the\n specified class functions.\n :param klass: The class to which the instrumentation will be applied\n '
decorate_all_in_class(klass, _prometheus_class_metric_decorator, []) | 797,859,531,807,395,000 | Convenience function applying the Prometheus metrics decorator to the
specified class functions.
:param klass: The class to which the instrumentation will be applied | management_layer/metrics.py | add_prometheus_metrics_for_class | girleffect/core-management-layer | python | def add_prometheus_metrics_for_class(klass: Type):
'\n Convenience function applying the Prometheus metrics decorator to the\n specified class functions.\n :param klass: The class to which the instrumentation will be applied\n '
decorate_all_in_class(klass, _prometheus_class_metric_decorator, []) |
def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
'\n Decorate all functions in a module with the specified decorator\n :param module_: The module to interrogate\n :param decorator: The decorator to apply\n :param whitelist: Functions not to be decorated.\n '
for name in dir(module_):
if (name not in whitelist):
obj = getattr(module_, name)
if (isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj)):
if (obj.__module__ == module_.__name__):
logger.debug(f'Adding metrics to {module_}:{name}')
setattr(module_, name, decorator(obj))
else:
logger.debug(f'No metrics on {module_}:{name} because it belongs to another module')
else:
logger.debug(f'No metrics on {module_}:{name} because it is not a coroutine or function') | 8,473,228,878,225,510,000 | Decorate all functions in a module with the specified decorator
:param module_: The module to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated. | management_layer/metrics.py | decorate_all_in_module | girleffect/core-management-layer | python | def decorate_all_in_module(module_: ModuleType, decorator: FunctionType, whitelist: list):
'\n Decorate all functions in a module with the specified decorator\n :param module_: The module to interrogate\n :param decorator: The decorator to apply\n :param whitelist: Functions not to be decorated.\n '
for name in dir(module_):
if (name not in whitelist):
obj = getattr(module_, name)
if (isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj)):
if (obj.__module__ == module_.__name__):
logger.debug(f'Adding metrics to {module_}:{name}')
setattr(module_, name, decorator(obj))
else:
logger.debug(f'No metrics on {module_}:{name} because it belongs to another module')
else:
logger.debug(f'No metrics on {module_}:{name} because it is not a coroutine or function') |
def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
'\n Decorate all functions in a class with the specified decorator\n :param klass: The class to interrogate\n :param decorator: The decorator to apply\n :param whitelist: Functions not to be decorated.\n '
for name in dir(klass):
if (name not in whitelist):
obj = getattr(klass, name)
if (isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj)):
logger.debug(f'Adding metrics to {klass}:{name}')
setattr(klass, name, decorator(obj))
else:
logger.debug(f'No metrics on {klass}:{name} because it is not a coroutine or function') | -8,243,281,856,500,145,000 | Decorate all functions in a class with the specified decorator
:param klass: The class to interrogate
:param decorator: The decorator to apply
:param whitelist: Functions not to be decorated. | management_layer/metrics.py | decorate_all_in_class | girleffect/core-management-layer | python | def decorate_all_in_class(klass: Type, decorator: FunctionType, whitelist: list):
'\n Decorate all functions in a class with the specified decorator\n :param klass: The class to interrogate\n :param decorator: The decorator to apply\n :param whitelist: Functions not to be decorated.\n '
for name in dir(klass):
if (name not in whitelist):
obj = getattr(klass, name)
if (isinstance(obj, FunctionType) or asyncio.iscoroutinefunction(obj)):
logger.debug(f'Adding metrics to {klass}:{name}')
setattr(klass, name, decorator(obj))
else:
logger.debug(f'No metrics on {klass}:{name} because it is not a coroutine or function') |
def is_spoiler(self) -> bool:
':class:`bool`: Whether this attachment contains a spoiler.'
return self.filename.startswith('SPOILER_') | -401,608,973,917,933,950 | :class:`bool`: Whether this attachment contains a spoiler. | discord/message.py | is_spoiler | NQN-Discord/discord.py | python | def is_spoiler(self) -> bool:
return self.filename.startswith('SPOILER_') |
async def save(self, fp: Union[(io.BufferedIOBase, PathLike[Any])], *, seek_begin: bool=True, use_cached: bool=False) -> int:
'|coro|\n\n Saves this attachment into a file-like object.\n\n Parameters\n -----------\n fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]\n The file-like object to save this attachment to or the filename\n to use. If a filename is passed then a file is created with that\n filename and used instead.\n seek_begin: :class:`bool`\n Whether to seek to the beginning of the file after saving is\n successfully done.\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n Raises\n --------\n HTTPException\n Saving the attachment failed.\n NotFound\n The attachment was deleted.\n\n Returns\n --------\n :class:`int`\n The number of bytes written.\n '
data = (await self.read(use_cached=use_cached))
if isinstance(fp, io.BufferedIOBase):
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data) | 1,103,073,210,186,261,900 | |coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written. | discord/message.py | save | NQN-Discord/discord.py | python | async def save(self, fp: Union[(io.BufferedIOBase, PathLike[Any])], *, seek_begin: bool=True, use_cached: bool=False) -> int:
'|coro|\n\n Saves this attachment into a file-like object.\n\n Parameters\n -----------\n fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]\n The file-like object to save this attachment to or the filename\n to use. If a filename is passed then a file is created with that\n filename and used instead.\n seek_begin: :class:`bool`\n Whether to seek to the beginning of the file after saving is\n successfully done.\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n Raises\n --------\n HTTPException\n Saving the attachment failed.\n NotFound\n The attachment was deleted.\n\n Returns\n --------\n :class:`int`\n The number of bytes written.\n '
data = (await self.read(use_cached=use_cached))
if isinstance(fp, io.BufferedIOBase):
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data) |
async def read(self, *, use_cached: bool=False) -> bytes:
'|coro|\n\n Retrieves the content of this attachment as a :class:`bytes` object.\n\n .. versionadded:: 1.1\n\n Parameters\n -----------\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n Raises\n ------\n HTTPException\n Downloading the attachment failed.\n Forbidden\n You do not have permissions to access this attachment\n NotFound\n The attachment was deleted.\n\n Returns\n -------\n :class:`bytes`\n The contents of the attachment.\n '
url = (self.proxy_url if use_cached else self.url)
data = (await self._http.get_from_cdn(url))
return data | 1,480,748,377,377,810,200 | |coro|
Retrieves the content of this attachment as a :class:`bytes` object.
.. versionadded:: 1.1
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`bytes`
The contents of the attachment. | discord/message.py | read | NQN-Discord/discord.py | python | async def read(self, *, use_cached: bool=False) -> bytes:
'|coro|\n\n Retrieves the content of this attachment as a :class:`bytes` object.\n\n .. versionadded:: 1.1\n\n Parameters\n -----------\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n Raises\n ------\n HTTPException\n Downloading the attachment failed.\n Forbidden\n You do not have permissions to access this attachment\n NotFound\n The attachment was deleted.\n\n Returns\n -------\n :class:`bytes`\n The contents of the attachment.\n '
url = (self.proxy_url if use_cached else self.url)
data = (await self._http.get_from_cdn(url))
return data |
async def to_file(self, *, use_cached: bool=False, spoiler: bool=False) -> File:
'|coro|\n\n Converts the attachment into a :class:`File` suitable for sending via\n :meth:`abc.Messageable.send`.\n\n .. versionadded:: 1.3\n\n Parameters\n -----------\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n .. versionadded:: 1.4\n spoiler: :class:`bool`\n Whether the file is a spoiler.\n\n .. versionadded:: 1.4\n\n Raises\n ------\n HTTPException\n Downloading the attachment failed.\n Forbidden\n You do not have permissions to access this attachment\n NotFound\n The attachment was deleted.\n\n Returns\n -------\n :class:`File`\n The attachment as a file suitable for sending.\n '
data = (await self.read(use_cached=use_cached))
return File(io.BytesIO(data), filename=self.filename, description=self.description, spoiler=spoiler) | -512,193,906,293,261,300 | |coro|
Converts the attachment into a :class:`File` suitable for sending via
:meth:`abc.Messageable.send`.
.. versionadded:: 1.3
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
.. versionadded:: 1.4
spoiler: :class:`bool`
Whether the file is a spoiler.
.. versionadded:: 1.4
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`File`
The attachment as a file suitable for sending. | discord/message.py | to_file | NQN-Discord/discord.py | python | async def to_file(self, *, use_cached: bool=False, spoiler: bool=False) -> File:
'|coro|\n\n Converts the attachment into a :class:`File` suitable for sending via\n :meth:`abc.Messageable.send`.\n\n .. versionadded:: 1.3\n\n Parameters\n -----------\n use_cached: :class:`bool`\n Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading\n the attachment. This will allow attachments to be saved after deletion\n more often, compared to the regular URL which is generally deleted right\n after the message is deleted. Note that this can still fail to download\n deleted attachments if too much time has passed and it does not work\n on some types of attachments.\n\n .. versionadded:: 1.4\n spoiler: :class:`bool`\n Whether the file is a spoiler.\n\n .. versionadded:: 1.4\n\n Raises\n ------\n HTTPException\n Downloading the attachment failed.\n Forbidden\n You do not have permissions to access this attachment\n NotFound\n The attachment was deleted.\n\n Returns\n -------\n :class:`File`\n The attachment as a file suitable for sending.\n '
data = (await self.read(use_cached=use_cached))
return File(io.BytesIO(data), filename=self.filename, description=self.description, spoiler=spoiler) |
@property
def id(self) -> int:
':class:`int`: The message ID of the deleted referenced message.'
return self._parent.message_id | 7,496,481,780,977,302,000 | :class:`int`: The message ID of the deleted referenced message. | discord/message.py | id | NQN-Discord/discord.py | python | @property
def id(self) -> int:
return self._parent.message_id |
@property
def channel_id(self) -> int:
':class:`int`: The channel ID of the deleted referenced message.'
return self._parent.channel_id | 8,617,493,030,907,954,000 | :class:`int`: The channel ID of the deleted referenced message. | discord/message.py | channel_id | NQN-Discord/discord.py | python | @property
def channel_id(self) -> int:
return self._parent.channel_id |
@property
def guild_id(self) -> Optional[int]:
'Optional[:class:`int`]: The guild ID of the deleted referenced message.'
return self._parent.guild_id | -2,298,746,837,178,312,200 | Optional[:class:`int`]: The guild ID of the deleted referenced message. | discord/message.py | guild_id | NQN-Discord/discord.py | python | @property
def guild_id(self) -> Optional[int]:
return self._parent.guild_id |
@classmethod
def from_message(cls, message: PartialMessage, *, fail_if_not_exists: bool=True) -> Self:
'Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.\n\n .. versionadded:: 1.6\n\n Parameters\n ----------\n message: :class:`~discord.Message`\n The message to be converted into a reference.\n fail_if_not_exists: :class:`bool`\n Whether replying to the referenced message should raise :class:`HTTPException`\n if the message no longer exists or Discord could not fetch the message.\n\n .. versionadded:: 1.7\n\n Returns\n -------\n :class:`MessageReference`\n A reference to the message.\n '
self = cls(message_id=message.id, channel_id=message.channel.id, guild_id=getattr(message.guild, 'id', None), fail_if_not_exists=fail_if_not_exists)
self._state = message._state
return self | 3,306,092,899,254,703,000 | Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.
.. versionadded:: 1.6
Parameters
----------
message: :class:`~discord.Message`
The message to be converted into a reference.
fail_if_not_exists: :class:`bool`
Whether replying to the referenced message should raise :class:`HTTPException`
if the message no longer exists or Discord could not fetch the message.
.. versionadded:: 1.7
Returns
-------
:class:`MessageReference`
A reference to the message. | discord/message.py | from_message | NQN-Discord/discord.py | python | @classmethod
def from_message(cls, message: PartialMessage, *, fail_if_not_exists: bool=True) -> Self:
'Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.\n\n .. versionadded:: 1.6\n\n Parameters\n ----------\n message: :class:`~discord.Message`\n The message to be converted into a reference.\n fail_if_not_exists: :class:`bool`\n Whether replying to the referenced message should raise :class:`HTTPException`\n if the message no longer exists or Discord could not fetch the message.\n\n .. versionadded:: 1.7\n\n Returns\n -------\n :class:`MessageReference`\n A reference to the message.\n '
self = cls(message_id=message.id, channel_id=message.channel.id, guild_id=getattr(message.guild, 'id', None), fail_if_not_exists=fail_if_not_exists)
self._state = message._state
return self |
@property
def cached_message(self) -> Optional[Message]:
'Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache.'
return (self._state and self._state._get_message(self.message_id)) | -3,361,113,795,059,638,300 | Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache. | discord/message.py | cached_message | NQN-Discord/discord.py | python | @property
def cached_message(self) -> Optional[Message]:
return (self._state and self._state._get_message(self.message_id)) |
@property
def jump_url(self) -> str:
':class:`str`: Returns a URL that allows the client to jump to the referenced message.\n\n .. versionadded:: 1.7\n '
guild_id = (self.guild_id if (self.guild_id is not None) else '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel_id}/{self.message_id}' | -4,083,239,424,268,389,400 | :class:`str`: Returns a URL that allows the client to jump to the referenced message.
.. versionadded:: 1.7 | discord/message.py | jump_url | NQN-Discord/discord.py | python | @property
def jump_url(self) -> str:
':class:`str`: Returns a URL that allows the client to jump to the referenced message.\n\n .. versionadded:: 1.7\n '
guild_id = (self.guild_id if (self.guild_id is not None) else '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel_id}/{self.message_id}' |
@property
def created_at(self) -> datetime.datetime:
":class:`datetime.datetime`: The interaction's creation time in UTC."
return utils.snowflake_time(self.id) | -866,747,575,477,857,700 | :class:`datetime.datetime`: The interaction's creation time in UTC. | discord/message.py | created_at | NQN-Discord/discord.py | python | @property
def created_at(self) -> datetime.datetime:
return utils.snowflake_time(self.id) |
@property
def created_at(self) -> datetime.datetime:
":class:`datetime.datetime`: The partial message's creation time in UTC."
return utils.snowflake_time(self.id) | 7,850,791,838,460,489,000 | :class:`datetime.datetime`: The partial message's creation time in UTC. | discord/message.py | created_at | NQN-Discord/discord.py | python | @property
def created_at(self) -> datetime.datetime:
return utils.snowflake_time(self.id) |
@property
def jump_url(self) -> str:
':class:`str`: Returns a URL that allows the client to jump to this message.'
guild_id = getattr(self.guild, 'id', '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel.id}/{self.id}' | -8,351,056,302,484,025,000 | :class:`str`: Returns a URL that allows the client to jump to this message. | discord/message.py | jump_url | NQN-Discord/discord.py | python | @property
def jump_url(self) -> str:
guild_id = getattr(self.guild, 'id', '@me')
return f'https://discord.com/channels/{guild_id}/{self.channel.id}/{self.id}' |
async def fetch(self) -> Message:
'|coro|\n\n Fetches the partial message to a full :class:`Message`.\n\n Raises\n --------\n NotFound\n The message was not found.\n Forbidden\n You do not have the permissions required to get a message.\n HTTPException\n Retrieving the message failed.\n\n Returns\n --------\n :class:`Message`\n The full message.\n '
data = (await self._state.http.get_message(self.channel.id, self.id))
return self._state.create_message(channel=self.channel, data=data) | 8,393,842,401,168,559,000 | |coro|
Fetches the partial message to a full :class:`Message`.
Raises
--------
NotFound
The message was not found.
Forbidden
You do not have the permissions required to get a message.
HTTPException
Retrieving the message failed.
Returns
--------
:class:`Message`
The full message. | discord/message.py | fetch | NQN-Discord/discord.py | python | async def fetch(self) -> Message:
'|coro|\n\n Fetches the partial message to a full :class:`Message`.\n\n Raises\n --------\n NotFound\n The message was not found.\n Forbidden\n You do not have the permissions required to get a message.\n HTTPException\n Retrieving the message failed.\n\n Returns\n --------\n :class:`Message`\n The full message.\n '
data = (await self._state.http.get_message(self.channel.id, self.id))
return self._state.create_message(channel=self.channel, data=data) |
async def delete(self, *, delay: Optional[float]=None) -> None:
"|coro|\n\n Deletes the message.\n\n Your own messages could be deleted without any proper permissions. However to\n delete other people's messages, you need the :attr:`~Permissions.manage_messages`\n permission.\n\n .. versionchanged:: 1.1\n Added the new ``delay`` keyword-only parameter.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message. If the deletion fails then it is silently ignored.\n\n Raises\n ------\n Forbidden\n You do not have proper permissions to delete the message.\n NotFound\n The message was deleted already\n HTTPException\n Deleting the message failed.\n "
if (delay is not None):
async def delete(delay: float):
(await asyncio.sleep(delay))
try:
(await self._state.http.delete_message(self.channel.id, self.id))
except HTTPException:
pass
asyncio.create_task(delete(delay))
else:
(await self._state.http.delete_message(self.channel.id, self.id)) | 7,263,314,868,395,550,000 | |coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message. If the deletion fails then it is silently ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already
HTTPException
Deleting the message failed. | discord/message.py | delete | NQN-Discord/discord.py | python | async def delete(self, *, delay: Optional[float]=None) -> None:
"|coro|\n\n Deletes the message.\n\n Your own messages could be deleted without any proper permissions. However to\n delete other people's messages, you need the :attr:`~Permissions.manage_messages`\n permission.\n\n .. versionchanged:: 1.1\n Added the new ``delay`` keyword-only parameter.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message. If the deletion fails then it is silently ignored.\n\n Raises\n ------\n Forbidden\n You do not have proper permissions to delete the message.\n NotFound\n The message was deleted already\n HTTPException\n Deleting the message failed.\n "
if (delay is not None):
async def delete(delay: float):
(await asyncio.sleep(delay))
try:
(await self._state.http.delete_message(self.channel.id, self.id))
except HTTPException:
pass
asyncio.create_task(delete(delay))
else:
(await self._state.http.delete_message(self.channel.id, self.id)) |
async def edit(self, content: Optional[str]=MISSING, embed: Optional[Embed]=MISSING, embeds: Sequence[Embed]=MISSING, attachments: Sequence[Union[(Attachment, File)]]=MISSING, delete_after: Optional[float]=None, allowed_mentions: Optional[AllowedMentions]=MISSING, view: Optional[View]=MISSING) -> Message:
"|coro|\n\n Edits the message.\n\n The content must be able to be transformed into a string via ``str(content)``.\n\n .. versionchanged:: 2.0\n Edits are no longer in-place, the newly edited message is returned instead.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n content: Optional[:class:`str`]\n The new content to replace the message with.\n Could be ``None`` to remove the content.\n embed: Optional[:class:`Embed`]\n The new embed to replace the original with.\n Could be ``None`` to remove the embed.\n embeds: List[:class:`Embed`]\n The new embeds to replace the original with. Must be a maximum of 10.\n To remove all embeds ``[]`` should be passed.\n\n .. versionadded:: 2.0\n attachments: List[Union[:class:`Attachment`, :class:`File`]]\n A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed\n then all attachments are removed.\n\n .. note::\n\n New files will always appear after current attachments.\n\n .. versionadded:: 2.0\n delete_after: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message we just edited. If the deletion fails,\n then it is silently ignored.\n allowed_mentions: Optional[:class:`~discord.AllowedMentions`]\n Controls the mentions being processed in this message. If this is\n passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.\n The merging behaviour only overrides attributes that have been explicitly passed\n to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.\n If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`\n are used instead.\n\n .. versionadded:: 1.4\n view: Optional[:class:`~discord.ui.View`]\n The updated view to update this message with. If ``None`` is passed then\n the view is removed.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to suppress a message without permissions or\n edited a message's content or embed that isn't yours.\n TypeError\n You specified both ``embed`` and ``embeds``\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n "
if (content is not MISSING):
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if (view is not MISSING):
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(content=content, embed=embed, embeds=embeds, attachments=attachments, view=view, allowed_mentions=allowed_mentions, previous_allowed_mentions=previous_allowed_mentions)
data = (await self._state.http.edit_message(self.channel.id, self.id, params=params))
message = Message(state=self._state, channel=self.channel, data=data)
if (view and (not view.is_finished())):
self._state.store_view(view, self.id)
if (delete_after is not None):
(await self.delete(delay=delete_after))
return message | 4,713,843,514,001,606,000 | |coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited message is returned instead.
.. versionchanged:: 2.0
This function will now raise :exc:`TypeError` instead of
``InvalidArgument``.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
embeds: List[:class:`Embed`]
The new embeds to replace the original with. Must be a maximum of 10.
To remove all embeds ``[]`` should be passed.
.. versionadded:: 2.0
attachments: List[Union[:class:`Attachment`, :class:`File`]]
A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed
then all attachments are removed.
.. note::
New files will always appear after current attachments.
.. versionadded:: 2.0
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
view: Optional[:class:`~discord.ui.View`]
The updated view to update this message with. If ``None`` is passed then
the view is removed.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
TypeError
You specified both ``embed`` and ``embeds``
Returns
--------
:class:`Message`
The newly edited message. | discord/message.py | edit | NQN-Discord/discord.py | python | async def edit(self, content: Optional[str]=MISSING, embed: Optional[Embed]=MISSING, embeds: Sequence[Embed]=MISSING, attachments: Sequence[Union[(Attachment, File)]]=MISSING, delete_after: Optional[float]=None, allowed_mentions: Optional[AllowedMentions]=MISSING, view: Optional[View]=MISSING) -> Message:
"|coro|\n\n Edits the message.\n\n The content must be able to be transformed into a string via ``str(content)``.\n\n .. versionchanged:: 2.0\n Edits are no longer in-place, the newly edited message is returned instead.\n\n .. versionchanged:: 2.0\n This function will now raise :exc:`TypeError` instead of\n ``InvalidArgument``.\n\n Parameters\n -----------\n content: Optional[:class:`str`]\n The new content to replace the message with.\n Could be ``None`` to remove the content.\n embed: Optional[:class:`Embed`]\n The new embed to replace the original with.\n Could be ``None`` to remove the embed.\n embeds: List[:class:`Embed`]\n The new embeds to replace the original with. Must be a maximum of 10.\n To remove all embeds ``[]`` should be passed.\n\n .. versionadded:: 2.0\n attachments: List[Union[:class:`Attachment`, :class:`File`]]\n A list of attachments to keep in the message as well as new files to upload. If ``[]`` is passed\n then all attachments are removed.\n\n .. note::\n\n New files will always appear after current attachments.\n\n .. versionadded:: 2.0\n delete_after: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background\n before deleting the message we just edited. If the deletion fails,\n then it is silently ignored.\n allowed_mentions: Optional[:class:`~discord.AllowedMentions`]\n Controls the mentions being processed in this message. If this is\n passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.\n The merging behaviour only overrides attributes that have been explicitly passed\n to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.\n If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`\n are used instead.\n\n .. versionadded:: 1.4\n view: Optional[:class:`~discord.ui.View`]\n The updated view to update this message with. If ``None`` is passed then\n the view is removed.\n\n Raises\n -------\n HTTPException\n Editing the message failed.\n Forbidden\n Tried to suppress a message without permissions or\n edited a message's content or embed that isn't yours.\n TypeError\n You specified both ``embed`` and ``embeds``\n\n Returns\n --------\n :class:`Message`\n The newly edited message.\n "
if (content is not MISSING):
previous_allowed_mentions = self._state.allowed_mentions
else:
previous_allowed_mentions = None
if (view is not MISSING):
self._state.prevent_view_updates_for(self.id)
params = handle_message_parameters(content=content, embed=embed, embeds=embeds, attachments=attachments, view=view, allowed_mentions=allowed_mentions, previous_allowed_mentions=previous_allowed_mentions)
data = (await self._state.http.edit_message(self.channel.id, self.id, params=params))
message = Message(state=self._state, channel=self.channel, data=data)
if (view and (not view.is_finished())):
self._state.store_view(view, self.id)
if (delete_after is not None):
(await self.delete(delay=delete_after))
return message |
async def publish(self) -> None:
'|coro|\n\n Publishes this message to your announcement channel.\n\n You must have the :attr:`~Permissions.send_messages` permission to do this.\n\n If the message is not your own then the :attr:`~Permissions.manage_messages`\n permission is also needed.\n\n Raises\n -------\n Forbidden\n You do not have the proper permissions to publish this message.\n HTTPException\n Publishing the message failed.\n '
(await self._state.http.publish_message(self.channel.id, self.id)) | -589,188,377,987,699,000 | |coro|
Publishes this message to your announcement channel.
You must have the :attr:`~Permissions.send_messages` permission to do this.
If the message is not your own then the :attr:`~Permissions.manage_messages`
permission is also needed.
Raises
-------
Forbidden
You do not have the proper permissions to publish this message.
HTTPException
Publishing the message failed. | discord/message.py | publish | NQN-Discord/discord.py | python | async def publish(self) -> None:
'|coro|\n\n Publishes this message to your announcement channel.\n\n You must have the :attr:`~Permissions.send_messages` permission to do this.\n\n If the message is not your own then the :attr:`~Permissions.manage_messages`\n permission is also needed.\n\n Raises\n -------\n Forbidden\n You do not have the proper permissions to publish this message.\n HTTPException\n Publishing the message failed.\n '
(await self._state.http.publish_message(self.channel.id, self.id)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.