body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def get_dataframe(self, squeeze=True):
'\n Cast recarrays for stress periods into single\n dataframe containing all stress periods.\n\n Parameters\n ----------\n squeeze : bool\n Reduce number of columns in dataframe to only include\n stress periods where a variable changes.\n\n Returns\n -------\n df : dataframe\n Dataframe of shape nrow = ncells, ncol = nvar x nper. If\n the squeeze option is chosen, nper is the number of\n stress periods where at least one cells is different,\n otherwise it is equal to the number of keys in MfList.data.\n\n Notes\n -----\n Requires pandas.\n\n '
try:
import pandas as pd
except Exception as e:
msg = 'MfList.get_dataframe() requires pandas'
raise ImportError(msg)
names = ['k', 'i', 'j']
if ('MNW2' in self.package.name):
names += ['wellid']
for per in range(self._model.nper):
if hasattr(self.data[per], 'dtype'):
varnames = list([n for n in self.data[per].dtype.names if (n not in names)])
break
dfs = []
for per in self.data.keys():
recs = self.data[per]
if ((recs is None) or (len(recs) == 0)):
columns = (names + list(['{}{}'.format(c, per) for c in varnames]))
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename('n')
if (count > 1).values.any():
print('Duplicated list entry locations aggregated for kper {}'.format(per))
for kij in count[(count > 1)].index.values:
print(' (k,i,j) {}'.format(kij))
dfi = dfg.sum()
dfi.columns = list(['{}{}'.format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if (var in n)])
diff = df[diffcols].fillna(0).diff(axis=1)
diff['{}0'.format(var)] = 1
changed = (diff.sum(axis=0) != 0)
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), 'node', ((df.i * self._model.ncol) + df.j))
return df | 6,324,942,353,991,596,000 | Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
----------
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cells is different,
otherwise it is equal to the number of keys in MfList.data.
Notes
-----
Requires pandas. | flopy/utils/util_list.py | get_dataframe | aleaf/flopy | python | def get_dataframe(self, squeeze=True):
'\n Cast recarrays for stress periods into single\n dataframe containing all stress periods.\n\n Parameters\n ----------\n squeeze : bool\n Reduce number of columns in dataframe to only include\n stress periods where a variable changes.\n\n Returns\n -------\n df : dataframe\n Dataframe of shape nrow = ncells, ncol = nvar x nper. If\n the squeeze option is chosen, nper is the number of\n stress periods where at least one cells is different,\n otherwise it is equal to the number of keys in MfList.data.\n\n Notes\n -----\n Requires pandas.\n\n '
try:
import pandas as pd
except Exception as e:
msg = 'MfList.get_dataframe() requires pandas'
raise ImportError(msg)
names = ['k', 'i', 'j']
if ('MNW2' in self.package.name):
names += ['wellid']
for per in range(self._model.nper):
if hasattr(self.data[per], 'dtype'):
varnames = list([n for n in self.data[per].dtype.names if (n not in names)])
break
dfs = []
for per in self.data.keys():
recs = self.data[per]
if ((recs is None) or (len(recs) == 0)):
columns = (names + list(['{}{}'.format(c, per) for c in varnames]))
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename('n')
if (count > 1).values.any():
print('Duplicated list entry locations aggregated for kper {}'.format(per))
for kij in count[(count > 1)].index.values:
print(' (k,i,j) {}'.format(kij))
dfi = dfg.sum()
dfi.columns = list(['{}{}'.format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if (var in n)])
diff = df[diffcols].fillna(0).diff(axis=1)
diff['{}0'.format(var)] = 1
changed = (diff.sum(axis=0) != 0)
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), 'node', ((df.i * self._model.ncol) + df.j))
return df |
def get_indices(self):
'\n a helper function for plotting - get all unique indices\n '
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if (('k' not in lnames) or ('j' not in lnames)):
raise NotImplementedError('MfList.get_indices requires kij')
kpers = list(self.data.keys())
kpers.sort()
indices = []
for (i, kper) in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if ((kper_vtype != int) or (kper_vtype is not None)):
d = self.data[kper]
if (not indices):
indices = list(zip(d['k'], d['i'], d['j']))
else:
new_indices = list(zip(d['k'], d['i'], d['j']))
for ni in new_indices:
if (ni not in indices):
indices.append(ni)
return indices | 3,431,313,341,761,410,600 | a helper function for plotting - get all unique indices | flopy/utils/util_list.py | get_indices | aleaf/flopy | python | def get_indices(self):
'\n \n '
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if (('k' not in lnames) or ('j' not in lnames)):
raise NotImplementedError('MfList.get_indices requires kij')
kpers = list(self.data.keys())
kpers.sort()
indices = []
for (i, kper) in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if ((kper_vtype != int) or (kper_vtype is not None)):
d = self.data[kper]
if (not indices):
indices = list(zip(d['k'], d['i'], d['j']))
else:
new_indices = list(zip(d['k'], d['i'], d['j']))
for ni in new_indices:
if (ni not in indices):
indices.append(ni)
return indices |
def plot(self, key=None, names=None, kper=0, filename_base=None, file_extension=None, mflay=None, **kwargs):
"\n Plot stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.stress_period_data.plot(ml.wel, kper=1)\n\n "
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, kper=kper, filename_base=filename_base, file_extension=file_extension, mflay=mflay, **kwargs)
return axes | 7,490,638,837,416,212,000 | Plot stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
key : str
MfList dictionary key. (default is None)
names : list
List of names for figure titles. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.stress_period_data.plot(ml.wel, kper=1) | flopy/utils/util_list.py | plot | aleaf/flopy | python | def plot(self, key=None, names=None, kper=0, filename_base=None, file_extension=None, mflay=None, **kwargs):
"\n Plot stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.stress_period_data.plot(ml.wel, kper=1)\n\n "
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, kper=kper, filename_base=filename_base, file_extension=file_extension, mflay=mflay, **kwargs)
return axes |
def to_shapefile(self, filename, kper=None):
"\n Export stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n filename : str\n Shapefile name to write\n kper : int\n MODFLOW zero-based stress period number to return. (default is None)\n\n Returns\n ----------\n None\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.to_shapefile('test_hk.shp', kper=1)\n "
import warnings
warnings.warn('Deprecation warning: to_shapefile() is deprecated. use .export()')
self.export(filename, kper=kper) | -2,751,994,616,249,143,000 | Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1) | flopy/utils/util_list.py | to_shapefile | aleaf/flopy | python | def to_shapefile(self, filename, kper=None):
"\n Export stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n filename : str\n Shapefile name to write\n kper : int\n MODFLOW zero-based stress period number to return. (default is None)\n\n Returns\n ----------\n None\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.to_shapefile('test_hk.shp', kper=1)\n "
import warnings
warnings.warn('Deprecation warning: to_shapefile() is deprecated. use .export()')
self.export(filename, kper=kper) |
def to_array(self, kper=0, mask=False):
"\n Convert stress period boundary condition (MfList) data for a\n specified stress period to a 3-D numpy array\n\n Parameters\n ----------\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n mask : boolean\n return array with np.NaN instead of zero\n Returns\n ----------\n out : dict of numpy.ndarrays\n Dictionary of 3-D numpy arrays containing the stress period data for\n a selected stress period. The dictionary keys are the MfList dtype\n names for the stress period data ('cond', 'flux', 'bhead', etc.).\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> v = ml.wel.stress_period_data.to_array(kper=1)\n\n "
i0 = 3
unstructured = False
if ('inode' in self.dtype.names):
raise NotImplementedError()
if ('node' in self.dtype.names):
if (('i' not in self.dtype.names) and ('j' not in self.dtype.names)):
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if (not (self.dtype.fields[name][0] == object)):
if unstructured:
arr = np.zeros(((self._model.nlay * self._model.ncpl),))
else:
arr = np.zeros((self._model.nlay, self._model.nrow, self._model.ncol))
arrays[name] = arr.copy()
if (kper not in self.data.keys()):
kpers = list(self.data.keys())
kpers.sort()
if (kper < kpers[0]):
if mask:
for (name, arr) in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
if (sarr == 0):
if mask:
for (name, arr) in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception('MfList: something bad happened')
for (name, arr) in arrays.items():
if unstructured:
cnt = np.zeros(((self._model.nlay * self._model.ncpl),), dtype=np.float)
else:
cnt = np.zeros((self._model.nlay, self._model.nrow, self._model.ncol), dtype=np.float)
for rec in sarr:
if unstructured:
arr[rec['node']] += rec[name]
cnt[rec['node']] += 1.0
else:
arr[(rec['k'], rec['i'], rec['j'])] += rec[name]
cnt[(rec['k'], rec['i'], rec['j'])] += 1.0
if (name not in ('cond', 'flux')):
idx = (cnt > 0.0)
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where((cnt == 0.0), arr)
arr[(cnt == 0.0)] = np.NaN
arrays[name] = arr.copy()
return arrays | 49,518,715,375,102,890 | Convert stress period boundary condition (MfList) data for a
specified stress period to a 3-D numpy array
Parameters
----------
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
mask : boolean
return array with np.NaN instead of zero
Returns
----------
out : dict of numpy.ndarrays
Dictionary of 3-D numpy arrays containing the stress period data for
a selected stress period. The dictionary keys are the MfList dtype
names for the stress period data ('cond', 'flux', 'bhead', etc.).
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> v = ml.wel.stress_period_data.to_array(kper=1) | flopy/utils/util_list.py | to_array | aleaf/flopy | python | def to_array(self, kper=0, mask=False):
"\n Convert stress period boundary condition (MfList) data for a\n specified stress period to a 3-D numpy array\n\n Parameters\n ----------\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n mask : boolean\n return array with np.NaN instead of zero\n Returns\n ----------\n out : dict of numpy.ndarrays\n Dictionary of 3-D numpy arrays containing the stress period data for\n a selected stress period. The dictionary keys are the MfList dtype\n names for the stress period data ('cond', 'flux', 'bhead', etc.).\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> v = ml.wel.stress_period_data.to_array(kper=1)\n\n "
i0 = 3
unstructured = False
if ('inode' in self.dtype.names):
raise NotImplementedError()
if ('node' in self.dtype.names):
if (('i' not in self.dtype.names) and ('j' not in self.dtype.names)):
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if (not (self.dtype.fields[name][0] == object)):
if unstructured:
arr = np.zeros(((self._model.nlay * self._model.ncpl),))
else:
arr = np.zeros((self._model.nlay, self._model.nrow, self._model.ncol))
arrays[name] = arr.copy()
if (kper not in self.data.keys()):
kpers = list(self.data.keys())
kpers.sort()
if (kper < kpers[0]):
if mask:
for (name, arr) in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
if (sarr == 0):
if mask:
for (name, arr) in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception('MfList: something bad happened')
for (name, arr) in arrays.items():
if unstructured:
cnt = np.zeros(((self._model.nlay * self._model.ncpl),), dtype=np.float)
else:
cnt = np.zeros((self._model.nlay, self._model.nrow, self._model.ncol), dtype=np.float)
for rec in sarr:
if unstructured:
arr[rec['node']] += rec[name]
cnt[rec['node']] += 1.0
else:
arr[(rec['k'], rec['i'], rec['j'])] += rec[name]
cnt[(rec['k'], rec['i'], rec['j'])] += 1.0
if (name not in ('cond', 'flux')):
idx = (cnt > 0.0)
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where((cnt == 0.0), arr)
arr[(cnt == 0.0)] = np.NaN
arrays[name] = arr.copy()
return arrays |
@classmethod
def from_4d(cls, model, pak_name, m4ds):
'construct an MfList instance from a dict of\n (attribute_name,masked 4D ndarray\n Parameters\n ----------\n model : mbase derived type\n pak_name : str package name (e.g GHB)\n m4ds : {attribute name:4d masked numpy.ndarray}\n Returns\n -------\n MfList instance\n '
sp_data = MfList.masked4D_arrays_to_stress_period_data(model.get_package(pak_name).get_default_dtype(), m4ds)
return cls(model.get_package(pak_name), data=sp_data) | 7,955,611,825,442,068,000 | construct an MfList instance from a dict of
(attribute_name,masked 4D ndarray
Parameters
----------
model : mbase derived type
pak_name : str package name (e.g GHB)
m4ds : {attribute name:4d masked numpy.ndarray}
Returns
-------
MfList instance | flopy/utils/util_list.py | from_4d | aleaf/flopy | python | @classmethod
def from_4d(cls, model, pak_name, m4ds):
'construct an MfList instance from a dict of\n (attribute_name,masked 4D ndarray\n Parameters\n ----------\n model : mbase derived type\n pak_name : str package name (e.g GHB)\n m4ds : {attribute name:4d masked numpy.ndarray}\n Returns\n -------\n MfList instance\n '
sp_data = MfList.masked4D_arrays_to_stress_period_data(model.get_package(pak_name).get_default_dtype(), m4ds)
return cls(model.get_package(pak_name), data=sp_data) |
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
' convert a dictionary of 4-dim masked arrays to\n a stress_period_data style dict of recarray\n Parameters\n ----------\n dtype : numpy dtype\n\n m4ds : dict {name:masked numpy 4-dim ndarray}\n Returns\n -------\n dict {kper:recarray}\n '
assert isinstance(m4ds, dict)
for (name, m4d) in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert (name in dtype.names)
assert (m4d.ndim == 4)
keys = list(m4ds.keys())
for (i1, key1) in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for (i2, key2) in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if (not np.array_equal(a1, a2)):
raise Exception(('Transient2d error: masking not equal' + ' for {0} and {1}'.format(key1, key2)))
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for (name, m4d) in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere((~ np.isnan(arr)))
v = []
for (k, i, j) in isnan:
v.append(arr[(k, i, j)])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd['i'] = ii
spd['k'] = kk
spd['j'] = jj
for (n, v) in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data | 1,632,529,002,862,806,500 | convert a dictionary of 4-dim masked arrays to
a stress_period_data style dict of recarray
Parameters
----------
dtype : numpy dtype
m4ds : dict {name:masked numpy 4-dim ndarray}
Returns
-------
dict {kper:recarray} | flopy/utils/util_list.py | masked4D_arrays_to_stress_period_data | aleaf/flopy | python | @staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
' convert a dictionary of 4-dim masked arrays to\n a stress_period_data style dict of recarray\n Parameters\n ----------\n dtype : numpy dtype\n\n m4ds : dict {name:masked numpy 4-dim ndarray}\n Returns\n -------\n dict {kper:recarray}\n '
assert isinstance(m4ds, dict)
for (name, m4d) in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert (name in dtype.names)
assert (m4d.ndim == 4)
keys = list(m4ds.keys())
for (i1, key1) in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for (i2, key2) in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if (not np.array_equal(a1, a2)):
raise Exception(('Transient2d error: masking not equal' + ' for {0} and {1}'.format(key1, key2)))
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for (name, m4d) in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere((~ np.isnan(arr)))
v = []
for (k, i, j) in isnan:
v.append(arr[(k, i, j)])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd['i'] = ii
spd['k'] = kk
spd['j'] = jj
for (n, v) in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data |
@main_app.route('/api')
def swagger():
'\n Responds with the OpenAPI specification for this application.\n '
return jsonify(spec.to_dict()) | -3,434,190,599,110,189,000 | Responds with the OpenAPI specification for this application. | flask_service/views.py | swagger | mwprog/atomist-flask-microservice | python | @main_app.route('/api')
def swagger():
'\n \n '
return jsonify(spec.to_dict()) |
@main_app.route('/health')
def health():
"\n Responds with the current's service health.\n\n Could be used by the liveness probe of a Kubernetes cluster for instance.\n "
return '' | -8,312,535,159,261,387,000 | Responds with the current's service health.
Could be used by the liveness probe of a Kubernetes cluster for instance. | flask_service/views.py | health | mwprog/atomist-flask-microservice | python | @main_app.route('/health')
def health():
"\n Responds with the current's service health.\n\n Could be used by the liveness probe of a Kubernetes cluster for instance.\n "
return |
@main_app.route('/status')
def status():
"\n Responds with the current's service status.\n\n Could be used by the readiness probe of a Kubernetes cluster.\n "
return '' | -83,078,368,568,048,130 | Responds with the current's service status.
Could be used by the readiness probe of a Kubernetes cluster. | flask_service/views.py | status | mwprog/atomist-flask-microservice | python | @main_app.route('/status')
def status():
"\n Responds with the current's service status.\n\n Could be used by the readiness probe of a Kubernetes cluster.\n "
return |
def get_covariance(self):
'Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array of shape=(n_features, n_features)\n Estimated covariance of data.\n '
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
cov = np.dot((components_.T * exp_var_diff), components_)
cov.flat[::(len(cov) + 1)] += self.noise_variance_
return cov | 6,392,993,480,560,589,000 | Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data. | sklearn/decomposition/_base.py | get_covariance | 40104/Scikit-Learn | python | def get_covariance(self):
'Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array of shape=(n_features, n_features)\n Estimated covariance of data.\n '
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
cov = np.dot((components_.T * exp_var_diff), components_)
cov.flat[::(len(cov) + 1)] += self.noise_variance_
return cov |
def get_precision(self):
'Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n '
n_features = self.components_.shape[1]
if (self.n_components_ == 0):
return (np.eye(n_features) / self.noise_variance_)
if (self.n_components_ == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
precision = (np.dot(components_, components_.T) / self.noise_variance_)
precision.flat[::(len(precision) + 1)] += (1.0 / exp_var_diff)
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= (- (self.noise_variance_ ** 2))
precision.flat[::(len(precision) + 1)] += (1.0 / self.noise_variance_)
return precision | 316,780,617,108,224,700 | Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data. | sklearn/decomposition/_base.py | get_precision | 40104/Scikit-Learn | python | def get_precision(self):
'Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n '
n_features = self.components_.shape[1]
if (self.n_components_ == 0):
return (np.eye(n_features) / self.noise_variance_)
if (self.n_components_ == n_features):
return linalg.inv(self.get_covariance())
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = (components_ * np.sqrt(exp_var[:, np.newaxis]))
exp_var_diff = np.maximum((exp_var - self.noise_variance_), 0.0)
precision = (np.dot(components_, components_.T) / self.noise_variance_)
precision.flat[::(len(precision) + 1)] += (1.0 / exp_var_diff)
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= (- (self.noise_variance_ ** 2))
precision.flat[::(len(precision) + 1)] += (1.0 / self.noise_variance_)
return precision |
@abstractmethod
def fit(self, X, y=None):
'Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ' | -3,515,658,082,423,659,500 | Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself. | sklearn/decomposition/_base.py | fit | 40104/Scikit-Learn | python | @abstractmethod
def fit(self, X, y=None):
'Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ' |
def transform(self, X):
'Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n '
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if (self.mean_ is not None):
X = (X - self.mean_)
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed | 6,729,435,418,467,268,000 | Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components. | sklearn/decomposition/_base.py | transform | 40104/Scikit-Learn | python | def transform(self, X):
'Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n '
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if (self.mean_ is not None):
X = (X - self.mean_)
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed |
def inverse_transform(self, X):
'Transform data back to its original space.\n\n In other words, return an input `X_original` whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\n Returns\n -------\n X_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n '
if self.whiten:
return (np.dot(X, (np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_)) + self.mean_)
else:
return (np.dot(X, self.components_) + self.mean_) | -4,156,797,171,178,644,500 | Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening. | sklearn/decomposition/_base.py | inverse_transform | 40104/Scikit-Learn | python | def inverse_transform(self, X):
'Transform data back to its original space.\n\n In other words, return an input `X_original` whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\n Returns\n -------\n X_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n '
if self.whiten:
return (np.dot(X, (np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_)) + self.mean_)
else:
return (np.dot(X, self.components_) + self.mean_) |
@property
def _n_features_out(self):
'Number of transformed output features.'
return self.components_.shape[0] | -7,760,010,724,038,969,000 | Number of transformed output features. | sklearn/decomposition/_base.py | _n_features_out | 40104/Scikit-Learn | python | @property
def _n_features_out(self):
return self.components_.shape[0] |
async def broadcast_avatar_position(room_channel_name, channel_name, json_data):
"\n Sends the new avatar's position to the users of the room.\n "
type = json_data['type']
payload = json_data['payload']
position = payload['position']
animate = payload['animate']
participant = (await sync_to_async(get_participant)(room_channel_name, channel_name))
participant_id = (await sync_to_async(get_participant_id)(participant))
def set_participant_position():
participant.x = position['x']
participant.y = position['y']
participant.direction_x = position['directionX']
participant.save()
(await sync_to_async(set_participant_position)())
(await channel_layer.group_send(room_channel_name, {'type': type, 'payload': {'participant_id': participant_id, 'position': position, 'animate': animate}})) | -7,486,539,441,214,262,000 | Sends the new avatar's position to the users of the room. | server/websockets/consumers/world/broadcasts/avatar.py | broadcast_avatar_position | Shadowsych/html5-msoy | python | async def broadcast_avatar_position(room_channel_name, channel_name, json_data):
"\n \n "
type = json_data['type']
payload = json_data['payload']
position = payload['position']
animate = payload['animate']
participant = (await sync_to_async(get_participant)(room_channel_name, channel_name))
participant_id = (await sync_to_async(get_participant_id)(participant))
def set_participant_position():
participant.x = position['x']
participant.y = position['y']
participant.direction_x = position['directionX']
participant.save()
(await sync_to_async(set_participant_position)())
(await channel_layer.group_send(room_channel_name, {'type': type, 'payload': {'participant_id': participant_id, 'position': position, 'animate': animate}})) |
async def broadcast_avatar_state(room_channel_name, channel_name, json_data):
"\n Sends the new avatar's state to the users of the room.\n "
type = json_data['type']
payload = json_data['payload']
state = payload['value']
participant = (await sync_to_async(get_participant)(room_channel_name, channel_name))
participant_id = (await sync_to_async(get_participant_id)(participant))
(await channel_layer.group_send(room_channel_name, {'type': humps.decamelize(type), 'payload': {'participant_id': participant_id, 'state': state}})) | -5,717,356,674,393,249,000 | Sends the new avatar's state to the users of the room. | server/websockets/consumers/world/broadcasts/avatar.py | broadcast_avatar_state | Shadowsych/html5-msoy | python | async def broadcast_avatar_state(room_channel_name, channel_name, json_data):
"\n \n "
type = json_data['type']
payload = json_data['payload']
state = payload['value']
participant = (await sync_to_async(get_participant)(room_channel_name, channel_name))
participant_id = (await sync_to_async(get_participant_id)(participant))
(await channel_layer.group_send(room_channel_name, {'type': humps.decamelize(type), 'payload': {'participant_id': participant_id, 'state': state}})) |
def height(root):
'\n DFS\n\n v = Vertices\n e = Edges\n d = Depth\n\n Time complexity: O(v + e)\n Space complexity: O(d)\n '
if root:
return (1 + max(height(root.left), height(root.right)))
else:
return (- 1) | -1,252,895,384,553,899,300 | DFS
v = Vertices
e = Edges
d = Depth
Time complexity: O(v + e)
Space complexity: O(d) | HackerRank/Data Structures/Trees/height-of-a-binary-tree.py | height | danielfsousa/algorithms-solutions | python | def height(root):
'\n DFS\n\n v = Vertices\n e = Edges\n d = Depth\n\n Time complexity: O(v + e)\n Space complexity: O(d)\n '
if root:
return (1 + max(height(root.left), height(root.right)))
else:
return (- 1) |
def set_random_seed(seed: Optional[int]=None) -> None:
'Set random seed for random, numpy, and pytorch.\n\n Args:\n seed: The random seed, defaults to `None` which select it randomly.\n '
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f'Set random seed to {seed}.')
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f'No random seed specified, randomly set random seed to {seed}.')
if (not (min_value <= seed <= max_value)):
new_seed = random.randint(min_value, max_value)
logger.info(f'Random seed {seed} is not valid, randomly set random seed to {new_seed}.')
seed = new_seed
random.seed(seed)
np.random.seed(seed=seed)
torch.manual_seed(seed) | -7,442,857,920,851,555,000 | Set random seed for random, numpy, and pytorch.
Args:
seed: The random seed, defaults to `None` which select it randomly. | src/emmental/utils/seed.py | set_random_seed | KeAWang/emmental | python | def set_random_seed(seed: Optional[int]=None) -> None:
'Set random seed for random, numpy, and pytorch.\n\n Args:\n seed: The random seed, defaults to `None` which select it randomly.\n '
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f'Set random seed to {seed}.')
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f'No random seed specified, randomly set random seed to {seed}.')
if (not (min_value <= seed <= max_value)):
new_seed = random.randint(min_value, max_value)
logger.info(f'Random seed {seed} is not valid, randomly set random seed to {new_seed}.')
seed = new_seed
random.seed(seed)
np.random.seed(seed=seed)
torch.manual_seed(seed) |
def setUp(self):
'Setup'
super(TestName, self).setUp()
self.collection.register(Name())
self.success_templates = ['fixtures/templates/good/outputs/name.yaml'] | 3,146,337,064,199,645,700 | Setup | test/rules/outputs/test_name.py | setUp | SanderKnape/cfn-python-lint | python | def setUp(self):
super(TestName, self).setUp()
self.collection.register(Name())
self.success_templates = ['fixtures/templates/good/outputs/name.yaml'] |
def test_file_positive(self):
'Test Positive'
self.helper_file_positive() | -1,556,978,985,838,885,400 | Test Positive | test/rules/outputs/test_name.py | test_file_positive | SanderKnape/cfn-python-lint | python | def test_file_positive(self):
self.helper_file_positive() |
def test_file_negative(self):
'Test failure'
self.helper_file_negative('fixtures/templates/bad/outputs/name.yaml', 1) | -4,366,943,575,606,948,000 | Test failure | test/rules/outputs/test_name.py | test_file_negative | SanderKnape/cfn-python-lint | python | def test_file_negative(self):
self.helper_file_negative('fixtures/templates/bad/outputs/name.yaml', 1) |
def _infer_state_dtype(explicit_dtype, state):
"Infer the dtype of an RNN state.\n\n Args:\n explicit_dtype: explicitly declared dtype or None.\n state: RNN's hidden state. Must be a Tensor or a nested iterable containing\n Tensors.\n\n Returns:\n dtype: inferred dtype of hidden state.\n\n Raises:\n ValueError: if `state` has heterogeneous dtypes or is empty.\n "
if (explicit_dtype is not None):
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if (not inferred_dtypes):
raise ValueError('Unable to infer dtype from empty state.')
all_same = all([(x == inferred_dtypes[0]) for x in inferred_dtypes])
if (not all_same):
raise ValueError('State has tensors of different inferred_dtypes. Unable to infer a single representative dtype.')
return inferred_dtypes[0]
else:
return state.dtype | -5,892,994,636,942,259,000 | Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty. | tensorflow/python/ops/rnn.py | _infer_state_dtype | gameover27/hiptensorflow | python | def _infer_state_dtype(explicit_dtype, state):
"Infer the dtype of an RNN state.\n\n Args:\n explicit_dtype: explicitly declared dtype or None.\n state: RNN's hidden state. Must be a Tensor or a nested iterable containing\n Tensors.\n\n Returns:\n dtype: inferred dtype of hidden state.\n\n Raises:\n ValueError: if `state` has heterogeneous dtypes or is empty.\n "
if (explicit_dtype is not None):
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if (not inferred_dtypes):
raise ValueError('Unable to infer dtype from empty state.')
all_same = all([(x == inferred_dtypes[0]) for x in inferred_dtypes])
if (not all_same):
raise ValueError('State has tensors of different inferred_dtypes. Unable to infer a single representative dtype.')
return inferred_dtypes[0]
else:
return state.dtype |
def _on_device(fn, device):
"Build the subgraph defined by lambda `fn` on `device` if it's not None."
if device:
with ops.device(device):
return fn()
else:
return fn() | -2,863,435,495,451,946,000 | Build the subgraph defined by lambda `fn` on `device` if it's not None. | tensorflow/python/ops/rnn.py | _on_device | gameover27/hiptensorflow | python | def _on_device(fn, device):
if device:
with ops.device(device):
return fn()
else:
return fn() |
def _rnn_step(time, sequence_length, min_sequence_length, max_sequence_length, zero_output, state, call_cell, state_size, skip_conditionals=False):
"Calculate one step of a dynamic RNN minibatch.\n\n Returns an (output, state) pair conditioned on the sequence_lengths.\n When skip_conditionals=False, the pseudocode is something like:\n\n if t >= max_sequence_length:\n return (zero_output, state)\n if t < min_sequence_length:\n return call_cell()\n\n # Selectively output zeros or output, old state or new state depending\n # on if we've finished calculating each row.\n new_output, new_state = call_cell()\n final_output = np.vstack([\n zero_output if time >= sequence_lengths[r] else new_output_r\n for r, new_output_r in enumerate(new_output)\n ])\n final_state = np.vstack([\n state[r] if time >= sequence_lengths[r] else new_state_r\n for r, new_state_r in enumerate(new_state)\n ])\n return (final_output, final_state)\n\n Args:\n time: Python int, the current time step\n sequence_length: int32 `Tensor` vector of size [batch_size]\n min_sequence_length: int32 `Tensor` scalar, min of sequence_length\n max_sequence_length: int32 `Tensor` scalar, max of sequence_length\n zero_output: `Tensor` vector of shape [output_size]\n state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,\n or a list/tuple of such tensors.\n call_cell: lambda returning tuple of (new_output, new_state) where\n new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.\n new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.\n state_size: The `cell.state_size` associated with the state.\n skip_conditionals: Python bool, whether to skip using the conditional\n calculations. This is useful for `dynamic_rnn`, where the input tensor\n matches `max_sequence_length`, and using conditionals just slows\n everything down.\n\n Returns:\n A tuple of (`final_output`, `final_state`) as given by the pseudocode above:\n final_output is a `Tensor` matrix of shape [batch_size, output_size]\n final_state is either a single `Tensor` matrix, or a tuple of such\n matrices (matching length and shapes of input `state`).\n\n Raises:\n ValueError: If the cell returns a state tuple whose length does not match\n that returned by `state_size`.\n "
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device((lambda : array_ops.where(copy_cond, output, new_output)), device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
flat_new_output = [_copy_one_through(zero_output, new_output) for (zero_output, new_output) in zip(flat_zero_output, flat_new_output)]
flat_new_state = [_copy_one_through(state, new_state) for (state, new_state) in zip(flat_state, flat_new_state)]
return (flat_new_output + flat_new_state)
def _maybe_copy_some_through():
'Run RNN step. Pass through either no or some past state.'
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond((time < min_sequence_length), (lambda : (flat_new_output + flat_new_state)), (lambda : _copy_some_through(flat_new_output, flat_new_state)))
if skip_conditionals:
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = (lambda : (flat_zero_output + flat_state))
final_output_and_state = control_flow_ops.cond((time >= max_sequence_length), empty_update, _maybe_copy_some_through)
if (len(final_output_and_state) != (len(flat_zero_output) + len(flat_state))):
raise ValueError('Internal error: state and output were not concatenated correctly.')
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for (output, flat_output) in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for (substate, flat_substate) in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(structure=state, flat_sequence=final_state)
return (final_output, final_state) | -261,439,537,545,024,900 | Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`. | tensorflow/python/ops/rnn.py | _rnn_step | gameover27/hiptensorflow | python | def _rnn_step(time, sequence_length, min_sequence_length, max_sequence_length, zero_output, state, call_cell, state_size, skip_conditionals=False):
"Calculate one step of a dynamic RNN minibatch.\n\n Returns an (output, state) pair conditioned on the sequence_lengths.\n When skip_conditionals=False, the pseudocode is something like:\n\n if t >= max_sequence_length:\n return (zero_output, state)\n if t < min_sequence_length:\n return call_cell()\n\n # Selectively output zeros or output, old state or new state depending\n # on if we've finished calculating each row.\n new_output, new_state = call_cell()\n final_output = np.vstack([\n zero_output if time >= sequence_lengths[r] else new_output_r\n for r, new_output_r in enumerate(new_output)\n ])\n final_state = np.vstack([\n state[r] if time >= sequence_lengths[r] else new_state_r\n for r, new_state_r in enumerate(new_state)\n ])\n return (final_output, final_state)\n\n Args:\n time: Python int, the current time step\n sequence_length: int32 `Tensor` vector of size [batch_size]\n min_sequence_length: int32 `Tensor` scalar, min of sequence_length\n max_sequence_length: int32 `Tensor` scalar, max of sequence_length\n zero_output: `Tensor` vector of shape [output_size]\n state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,\n or a list/tuple of such tensors.\n call_cell: lambda returning tuple of (new_output, new_state) where\n new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.\n new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.\n state_size: The `cell.state_size` associated with the state.\n skip_conditionals: Python bool, whether to skip using the conditional\n calculations. This is useful for `dynamic_rnn`, where the input tensor\n matches `max_sequence_length`, and using conditionals just slows\n everything down.\n\n Returns:\n A tuple of (`final_output`, `final_state`) as given by the pseudocode above:\n final_output is a `Tensor` matrix of shape [batch_size, output_size]\n final_state is either a single `Tensor` matrix, or a tuple of such\n matrices (matching length and shapes of input `state`).\n\n Raises:\n ValueError: If the cell returns a state tuple whose length does not match\n that returned by `state_size`.\n "
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device((lambda : array_ops.where(copy_cond, output, new_output)), device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
flat_new_output = [_copy_one_through(zero_output, new_output) for (zero_output, new_output) in zip(flat_zero_output, flat_new_output)]
flat_new_state = [_copy_one_through(state, new_state) for (state, new_state) in zip(flat_state, flat_new_state)]
return (flat_new_output + flat_new_state)
def _maybe_copy_some_through():
'Run RNN step. Pass through either no or some past state.'
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond((time < min_sequence_length), (lambda : (flat_new_output + flat_new_state)), (lambda : _copy_some_through(flat_new_output, flat_new_state)))
if skip_conditionals:
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = (lambda : (flat_zero_output + flat_state))
final_output_and_state = control_flow_ops.cond((time >= max_sequence_length), empty_update, _maybe_copy_some_through)
if (len(final_output_and_state) != (len(flat_zero_output) + len(flat_state))):
raise ValueError('Internal error: state and output were not concatenated correctly.')
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for (output, flat_output) in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for (substate, flat_substate) in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(structure=state, flat_sequence=final_state)
return (final_output, final_state) |
def _reverse_seq(input_seq, lengths):
'Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)\n or nested tuples of tensors.\n lengths: A `Tensor` of dimension batch_size, containing lengths for each\n sequence in the batch. If "None" is specified, simply reverses\n the list.\n\n Returns:\n time-reversed sequence\n '
if (lengths is None):
return list(reversed(input_seq))
flat_input_seq = tuple((nest.flatten(input_) for input_ in input_seq))
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
s_joined = array_ops.stack(sequence)
if (lengths is not None):
lengths = math_ops.to_int64(lengths)
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
result = array_ops.unstack(s_reversed)
for (r, flat_result) in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for (input_, flat_result) in zip(input_seq, flat_results)]
return results | 9,143,443,463,951,144,000 | Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence | tensorflow/python/ops/rnn.py | _reverse_seq | gameover27/hiptensorflow | python | def _reverse_seq(input_seq, lengths):
'Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)\n or nested tuples of tensors.\n lengths: A `Tensor` of dimension batch_size, containing lengths for each\n sequence in the batch. If "None" is specified, simply reverses\n the list.\n\n Returns:\n time-reversed sequence\n '
if (lengths is None):
return list(reversed(input_seq))
flat_input_seq = tuple((nest.flatten(input_) for input_ in input_seq))
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
s_joined = array_ops.stack(sequence)
if (lengths is not None):
lengths = math_ops.to_int64(lengths)
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
result = array_ops.unstack(s_reversed)
for (r, flat_result) in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for (input_, flat_result) in zip(input_seq, flat_results)]
return results |
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
'Creates a dynamic version of bidirectional recurrent neural network.\n\n Similar to the unidirectional case above (rnn) but takes input and builds\n independent forward and backward RNNs. The input_size of forward and\n backward cell must match. The initial state for both directions is zero by\n default (but can be set optionally) and no intermediate states are ever\n returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not\n given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: The RNN inputs.\n If time_major == False (default), this must be a tensor of shape:\n `[batch_size, max_time, input_size]`.\n If time_major == True, this must be a tensor of shape:\n `[max_time, batch_size, input_size]`.\n [batch_size, input_size].\n sequence_length: An int32/int64 vector, size `[batch_size]`,\n containing the actual lengths for each of the sequences.\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n `[batch_size, cell_fw.state_size]`.\n If `cell_fw.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.\n initial_state_bw: (optional) Same as for `initial_state_fw`, but using\n the corresponding properties of `cell_bw`.\n dtype: (optional) The data type for the initial states and expected output.\n Required if initial_states are not provided or RNN states have a\n heterogeneous dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n dtype: (optional) The data type for the initial state. Required if\n either of the initial states are not provided.\n scope: VariableScope for the created subgraph; defaults to\n "bidirectional_rnn"\n\n Returns:\n A tuple (outputs, output_states) where:\n outputs: A tuple (output_fw, output_bw) containing the forward and\n the backward rnn output `Tensor`.\n If time_major == False (default),\n output_fw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_bw.output_size]`.\n If time_major == True,\n output_fw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_bw.output_size]`.\n It returns a tuple instead of a single concatenated `Tensor`, unlike\n in the `bidirectional_rnn`. If the concatenated one is preferred,\n the forward and backward outputs can be concatenated as\n `tf.concat(outputs, 2)`.\n output_states: A tuple (output_state_fw, output_state_bw) containing\n the forward and the backward final states of bidirectional rnn.\n\n Raises:\n TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.\n '
if (not isinstance(cell_fw, rnn_cell_impl._RNNCell)):
raise TypeError('cell_fw must be an instance of RNNCell')
if (not isinstance(cell_bw, rnn_cell_impl._RNNCell)):
raise TypeError('cell_bw must be an instance of RNNCell')
with vs.variable_scope((scope or 'bidirectional_rnn')):
with vs.variable_scope('fw') as fw_scope:
(output_fw, output_state_fw) = dynamic_rnn(cell=cell_fw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_fw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=fw_scope)
if (not time_major):
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope('bw') as bw_scope:
inputs_reverse = array_ops.reverse_sequence(input=inputs, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim)
(tmp, output_state_bw) = dynamic_rnn(cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length, initial_state=initial_state_bw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(input=tmp, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states) | -1,378,400,897,695,843,300 | Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`. | tensorflow/python/ops/rnn.py | bidirectional_dynamic_rnn | gameover27/hiptensorflow | python | def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
'Creates a dynamic version of bidirectional recurrent neural network.\n\n Similar to the unidirectional case above (rnn) but takes input and builds\n independent forward and backward RNNs. The input_size of forward and\n backward cell must match. The initial state for both directions is zero by\n default (but can be set optionally) and no intermediate states are ever\n returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not\n given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: The RNN inputs.\n If time_major == False (default), this must be a tensor of shape:\n `[batch_size, max_time, input_size]`.\n If time_major == True, this must be a tensor of shape:\n `[max_time, batch_size, input_size]`.\n [batch_size, input_size].\n sequence_length: An int32/int64 vector, size `[batch_size]`,\n containing the actual lengths for each of the sequences.\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n `[batch_size, cell_fw.state_size]`.\n If `cell_fw.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.\n initial_state_bw: (optional) Same as for `initial_state_fw`, but using\n the corresponding properties of `cell_bw`.\n dtype: (optional) The data type for the initial states and expected output.\n Required if initial_states are not provided or RNN states have a\n heterogeneous dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n dtype: (optional) The data type for the initial state. Required if\n either of the initial states are not provided.\n scope: VariableScope for the created subgraph; defaults to\n "bidirectional_rnn"\n\n Returns:\n A tuple (outputs, output_states) where:\n outputs: A tuple (output_fw, output_bw) containing the forward and\n the backward rnn output `Tensor`.\n If time_major == False (default),\n output_fw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_bw.output_size]`.\n If time_major == True,\n output_fw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_bw.output_size]`.\n It returns a tuple instead of a single concatenated `Tensor`, unlike\n in the `bidirectional_rnn`. If the concatenated one is preferred,\n the forward and backward outputs can be concatenated as\n `tf.concat(outputs, 2)`.\n output_states: A tuple (output_state_fw, output_state_bw) containing\n the forward and the backward final states of bidirectional rnn.\n\n Raises:\n TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.\n '
if (not isinstance(cell_fw, rnn_cell_impl._RNNCell)):
raise TypeError('cell_fw must be an instance of RNNCell')
if (not isinstance(cell_bw, rnn_cell_impl._RNNCell)):
raise TypeError('cell_bw must be an instance of RNNCell')
with vs.variable_scope((scope or 'bidirectional_rnn')):
with vs.variable_scope('fw') as fw_scope:
(output_fw, output_state_fw) = dynamic_rnn(cell=cell_fw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_fw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=fw_scope)
if (not time_major):
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope('bw') as bw_scope:
inputs_reverse = array_ops.reverse_sequence(input=inputs, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim)
(tmp, output_state_bw) = dynamic_rnn(cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length, initial_state=initial_state_bw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(input=tmp, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states) |
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
'Creates a recurrent neural network specified by RNNCell `cell`.\n\n This function is functionally identical to the function `rnn` above, but\n performs fully dynamic unrolling of `inputs`.\n\n Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for\n each frame. Instead, `inputs` may be a single `Tensor` where\n the maximum time is either the first or second dimension (see the parameter\n `time_major`). Alternatively, it may be a (possibly nested) tuple of\n Tensors, each of them having matching batch and time dimensions.\n The corresponding output is either a single `Tensor` having the same number\n of time steps and batch size, or a (possibly nested) tuple of such tensors,\n matching the nested structure of `cell.output_size`.\n\n The parameter `sequence_length` is optional and is used to copy-through state\n and zero-out outputs when past a batch element\'s sequence length. So it\'s more\n for correctness than performance, unlike in rnn().\n\n Args:\n cell: An instance of RNNCell.\n inputs: The RNN inputs.\n\n If `time_major == False` (default), this must be a `Tensor` of shape:\n `[batch_size, max_time, ...]`, or a nested tuple of such\n elements.\n\n If `time_major == True`, this must be a `Tensor` of shape:\n `[max_time, batch_size, ...]`, or a nested tuple of such\n elements.\n\n This may also be a (possibly nested) tuple of Tensors satisfying\n this property. The first two dimensions must match across all the inputs,\n but otherwise the ranks and other shape components may differ.\n In this case, input to `cell` at each time-step will replicate the\n structure of these tuples, except for the time dimension (from which the\n time is taken).\n\n The input to `cell` at each time step will be a `Tensor` or (possibly\n nested) tuple of Tensors each with dimensions `[batch_size, ...]`.\n\n sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.\n initial_state: (optional) An initial state for the RNN.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n dtype: (optional) The data type for the initial state and expected output.\n Required if initial_state is not provided or RNN state has a heterogeneous\n dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n scope: VariableScope for the created subgraph; defaults to "rnn".\n\n Returns:\n A pair (outputs, state) where:\n\n outputs: The RNN output `Tensor`.\n\n If time_major == False (default), this will be a `Tensor` shaped:\n `[batch_size, max_time, cell.output_size]`.\n\n If time_major == True, this will be a `Tensor` shaped:\n `[max_time, batch_size, cell.output_size]`.\n\n Note, if `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `outputs` will be a tuple having the\n same structure as `cell.output_size`, containing Tensors having shapes\n corresponding to the shape data in `cell.output_size`.\n\n state: The final state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n '
if (not isinstance(cell, rnn_cell_impl._RNNCell)):
raise TypeError('cell must be an instance of RNNCell')
flat_input = nest.flatten(inputs)
if (not time_major):
flat_input = tuple((array_ops.transpose(input_, [1, 0, 2]) for input_ in flat_input))
parallel_iterations = (parallel_iterations or 32)
if (sequence_length is not None):
sequence_length = math_ops.to_int32(sequence_length)
if (sequence_length.get_shape().ndims not in (None, 1)):
raise ValueError(('sequence_length must be a vector of length batch_size, but saw shape: %s' % sequence_length.get_shape()))
sequence_length = array_ops.identity(sequence_length, name='sequence_length')
with vs.variable_scope((scope or 'rnn')) as varscope:
if (varscope.caching_device is None):
varscope.set_caching_device((lambda op: op.device))
input_shape = tuple((array_ops.shape(input_) for input_ in flat_input))
batch_size = input_shape[0][1]
for input_ in input_shape:
if (input_[1].get_shape() != batch_size.get_shape()):
raise ValueError('All inputs should have the same batch size')
if (initial_state is not None):
state = initial_state
else:
if (not dtype):
raise ValueError('If no initial_state is provided, dtype must be.')
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [('Expected shape for Tensor %s is ' % x.name), packed_shape, ' but saw shape: ', x_shape])
if (sequence_length is not None):
with ops.control_dependencies([_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name='CheckSeqLen')
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype)
if (not time_major):
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2]) for output in flat_output]
outputs = nest.pack_sequence_as(structure=outputs, flat_sequence=flat_output)
return (outputs, final_state) | 1,271,462,727,045,380,000 | Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list. | tensorflow/python/ops/rnn.py | dynamic_rnn | gameover27/hiptensorflow | python | def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):
'Creates a recurrent neural network specified by RNNCell `cell`.\n\n This function is functionally identical to the function `rnn` above, but\n performs fully dynamic unrolling of `inputs`.\n\n Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for\n each frame. Instead, `inputs` may be a single `Tensor` where\n the maximum time is either the first or second dimension (see the parameter\n `time_major`). Alternatively, it may be a (possibly nested) tuple of\n Tensors, each of them having matching batch and time dimensions.\n The corresponding output is either a single `Tensor` having the same number\n of time steps and batch size, or a (possibly nested) tuple of such tensors,\n matching the nested structure of `cell.output_size`.\n\n The parameter `sequence_length` is optional and is used to copy-through state\n and zero-out outputs when past a batch element\'s sequence length. So it\'s more\n for correctness than performance, unlike in rnn().\n\n Args:\n cell: An instance of RNNCell.\n inputs: The RNN inputs.\n\n If `time_major == False` (default), this must be a `Tensor` of shape:\n `[batch_size, max_time, ...]`, or a nested tuple of such\n elements.\n\n If `time_major == True`, this must be a `Tensor` of shape:\n `[max_time, batch_size, ...]`, or a nested tuple of such\n elements.\n\n This may also be a (possibly nested) tuple of Tensors satisfying\n this property. The first two dimensions must match across all the inputs,\n but otherwise the ranks and other shape components may differ.\n In this case, input to `cell` at each time-step will replicate the\n structure of these tuples, except for the time dimension (from which the\n time is taken).\n\n The input to `cell` at each time step will be a `Tensor` or (possibly\n nested) tuple of Tensors each with dimensions `[batch_size, ...]`.\n\n sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.\n initial_state: (optional) An initial state for the RNN.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a tuple, this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n dtype: (optional) The data type for the initial state and expected output.\n Required if initial_state is not provided or RNN state has a heterogeneous\n dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors.\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\n Using `time_major = True` is a bit more efficient because it avoids\n transposes at the beginning and end of the RNN calculation. However,\n most TensorFlow data is batch-major, so by default this function\n accepts input and emits output in batch-major form.\n scope: VariableScope for the created subgraph; defaults to "rnn".\n\n Returns:\n A pair (outputs, state) where:\n\n outputs: The RNN output `Tensor`.\n\n If time_major == False (default), this will be a `Tensor` shaped:\n `[batch_size, max_time, cell.output_size]`.\n\n If time_major == True, this will be a `Tensor` shaped:\n `[max_time, batch_size, cell.output_size]`.\n\n Note, if `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `outputs` will be a tuple having the\n same structure as `cell.output_size`, containing Tensors having shapes\n corresponding to the shape data in `cell.output_size`.\n\n state: The final state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n '
if (not isinstance(cell, rnn_cell_impl._RNNCell)):
raise TypeError('cell must be an instance of RNNCell')
flat_input = nest.flatten(inputs)
if (not time_major):
flat_input = tuple((array_ops.transpose(input_, [1, 0, 2]) for input_ in flat_input))
parallel_iterations = (parallel_iterations or 32)
if (sequence_length is not None):
sequence_length = math_ops.to_int32(sequence_length)
if (sequence_length.get_shape().ndims not in (None, 1)):
raise ValueError(('sequence_length must be a vector of length batch_size, but saw shape: %s' % sequence_length.get_shape()))
sequence_length = array_ops.identity(sequence_length, name='sequence_length')
with vs.variable_scope((scope or 'rnn')) as varscope:
if (varscope.caching_device is None):
varscope.set_caching_device((lambda op: op.device))
input_shape = tuple((array_ops.shape(input_) for input_ in flat_input))
batch_size = input_shape[0][1]
for input_ in input_shape:
if (input_[1].get_shape() != batch_size.get_shape()):
raise ValueError('All inputs should have the same batch size')
if (initial_state is not None):
state = initial_state
else:
if (not dtype):
raise ValueError('If no initial_state is provided, dtype must be.')
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)), [('Expected shape for Tensor %s is ' % x.name), packed_shape, ' but saw shape: ', x_shape])
if (sequence_length is not None):
with ops.control_dependencies([_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(sequence_length, name='CheckSeqLen')
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(cell, inputs, state, parallel_iterations=parallel_iterations, swap_memory=swap_memory, sequence_length=sequence_length, dtype=dtype)
if (not time_major):
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2]) for output in flat_output]
outputs = nest.pack_sequence_as(structure=outputs, flat_sequence=flat_output)
return (outputs, final_state) |
def _dynamic_rnn_loop(cell, inputs, initial_state, parallel_iterations, swap_memory, sequence_length=None, dtype=None):
'Internal implementation of Dynamic RNN.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested\n tuple of such elements.\n initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if\n `cell.state_size` is a tuple, then this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n parallel_iterations: Positive Python int.\n swap_memory: A Python boolean\n sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].\n dtype: (optional) Expected dtype of output. If not specified, inferred from\n initial_state.\n\n Returns:\n Tuple `(final_outputs, final_state)`.\n final_outputs:\n A `Tensor` of shape `[time, batch_size, cell.output_size]`. If\n `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`\n objects, then this returns a (possibly nsted) tuple of Tensors matching\n the corresponding shapes.\n final_state:\n A `Tensor`, or possibly nested tuple of Tensors, matching in length\n and shapes to `initial_state`.\n\n Raises:\n ValueError: If the input depth cannot be inferred via shape inference\n from the inputs.\n '
state = initial_state
assert isinstance(parallel_iterations, int), 'parallel_iterations must be int'
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple((input_.get_shape().with_rank_at_least(3) for input_ in flat_input))
(const_time_steps, const_batch_size) = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if (not shape[2:].is_fully_defined()):
raise ValueError('Input size (depth of inputs) must be accessible via shape inference, but saw value None.')
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if (const_time_steps != got_time_steps):
raise ValueError('Time steps is not the same for all the elements in the input in a batch.')
if (const_batch_size != got_batch_size):
raise ValueError('Batch_size is not the same for all the elements in the input.')
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple((_create_zero_arrays(output) for output in flat_output_size))
zero_output = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=flat_zero_output)
if (sequence_length is not None):
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name='time')
with ops.name_scope('dynamic_rnn') as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype, size=time_steps, tensor_array_name=(base_name + name))
output_ta = tuple((_create_ta(('output_%d' % i), _infer_state_dtype(dtype, state)) for i in range(len(flat_output_size))))
input_ta = tuple((_create_ta(('input_%d' % i), flat_input[0].dtype) for i in range(len(flat_input))))
input_ta = tuple((ta.unstack(input_) for (ta, input_) in zip(input_ta, flat_input)))
def _time_step(time, output_ta_t, state):
'Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n '
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = (lambda : cell(input_t, state))
if (sequence_length is not None):
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
return ((time + 1), output_ta_t, new_state)
(_, output_final_ta, final_state) = control_flow_ops.while_loop(cond=(lambda time, *_: (time < time_steps)), body=_time_step, loop_vars=(time, output_ta, state), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
final_outputs = tuple((ta.stack() for ta in output_final_ta))
for (output, output_size) in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state) | -1,497,298,739,967,970,000 | Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs. | tensorflow/python/ops/rnn.py | _dynamic_rnn_loop | gameover27/hiptensorflow | python | def _dynamic_rnn_loop(cell, inputs, initial_state, parallel_iterations, swap_memory, sequence_length=None, dtype=None):
'Internal implementation of Dynamic RNN.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested\n tuple of such elements.\n initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if\n `cell.state_size` is a tuple, then this should be a tuple of\n tensors having shapes `[batch_size, s] for s in cell.state_size`.\n parallel_iterations: Positive Python int.\n swap_memory: A Python boolean\n sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].\n dtype: (optional) Expected dtype of output. If not specified, inferred from\n initial_state.\n\n Returns:\n Tuple `(final_outputs, final_state)`.\n final_outputs:\n A `Tensor` of shape `[time, batch_size, cell.output_size]`. If\n `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`\n objects, then this returns a (possibly nsted) tuple of Tensors matching\n the corresponding shapes.\n final_state:\n A `Tensor`, or possibly nested tuple of Tensors, matching in length\n and shapes to `initial_state`.\n\n Raises:\n ValueError: If the input depth cannot be inferred via shape inference\n from the inputs.\n '
state = initial_state
assert isinstance(parallel_iterations, int), 'parallel_iterations must be int'
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple((input_.get_shape().with_rank_at_least(3) for input_ in flat_input))
(const_time_steps, const_batch_size) = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if (not shape[2:].is_fully_defined()):
raise ValueError('Input size (depth of inputs) must be accessible via shape inference, but saw value None.')
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if (const_time_steps != got_time_steps):
raise ValueError('Time steps is not the same for all the elements in the input in a batch.')
if (const_batch_size != got_batch_size):
raise ValueError('Batch_size is not the same for all the elements in the input.')
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple((_create_zero_arrays(output) for output in flat_output_size))
zero_output = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=flat_zero_output)
if (sequence_length is not None):
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name='time')
with ops.name_scope('dynamic_rnn') as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype, size=time_steps, tensor_array_name=(base_name + name))
output_ta = tuple((_create_ta(('output_%d' % i), _infer_state_dtype(dtype, state)) for i in range(len(flat_output_size))))
input_ta = tuple((_create_ta(('input_%d' % i), flat_input[0].dtype) for i in range(len(flat_input))))
input_ta = tuple((ta.unstack(input_) for (ta, input_) in zip(input_ta, flat_input)))
def _time_step(time, output_ta_t, state):
'Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n '
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = (lambda : cell(input_t, state))
if (sequence_length is not None):
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
return ((time + 1), output_ta_t, new_state)
(_, output_final_ta, final_state) = control_flow_ops.while_loop(cond=(lambda time, *_: (time < time_steps)), body=_time_step, loop_vars=(time, output_ta, state), parallel_iterations=parallel_iterations, swap_memory=swap_memory)
final_outputs = tuple((ta.stack() for ta in output_final_ta))
for (output, output_size) in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state) |
def raw_rnn(cell, loop_fn, parallel_iterations=None, swap_memory=False, scope=None):
'Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.\n\n **NOTE: This method is still in testing, and the API may change.**\n\n This function is a more primitive version of `dynamic_rnn` that provides\n more direct access to the inputs each iteration. It also provides more\n control over when to start and finish reading the sequence, and\n what to emit for the output.\n\n For example, it can be used to implement the dynamic decoder of a seq2seq\n model.\n\n Instead of working with `Tensor` objects, most operations work with\n `TensorArray` objects directly.\n\n The operation of `raw_rnn`, in pseudo-code, is basically the following:\n\n ```python\n time = tf.constant(0, dtype=tf.int32)\n (finished, next_input, initial_state, _, loop_state) = loop_fn(\n time=time, cell_output=None, cell_state=None, loop_state=None)\n emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)\n state = initial_state\n while not all(finished):\n (output, cell_state) = cell(next_input, state)\n (next_finished, next_input, next_state, emit, loop_state) = loop_fn(\n time=time + 1, cell_output=output, cell_state=cell_state,\n loop_state=loop_state)\n # Emit zeros and copy forward state for minibatch entries that are finished.\n state = tf.where(finished, state, next_state)\n emit = tf.where(finished, tf.zeros_like(emit), emit)\n emit_ta = emit_ta.write(time, emit)\n # If any new minibatch entries are marked as finished, mark these.\n finished = tf.logical_or(finished, next_finished)\n time += 1\n return (emit_ta, state, loop_state)\n ```\n\n with the additional properties that output and state may be (possibly nested)\n tuples, as determined by `cell.output_size` and `cell.state_size`, and\n as a result the final `state` and `emit_ta` may themselves be tuples.\n\n A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:\n\n ```python\n inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\n dtype=tf.float32)\n sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = tf.contrib.rnn.LSTMCell(num_units)\n\n def loop_fn(time, cell_output, cell_state, loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_cell_state = cell.zero_state(batch_size, tf.float32)\n else:\n next_cell_state = cell_state\n elements_finished = (time >= sequence_length)\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),\n lambda: inputs_ta.read(time))\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)\n outputs = outputs_ta.stack()\n ```\n\n Args:\n cell: An instance of RNNCell.\n loop_fn: A callable that takes inputs\n `(time, cell_output, cell_state, loop_state)`\n and returns the tuple\n `(finished, next_input, next_cell_state, emit_output, next_loop_state)`.\n Here `time` is an int32 scalar `Tensor`, `cell_output` is a\n `Tensor` or (possibly nested) tuple of tensors as determined by\n `cell.output_size`, and `cell_state` is a `Tensor`\n or (possibly nested) tuple of tensors, as determined by the `loop_fn`\n on its first call (and should match `cell.state_size`).\n The outputs are: `finished`, a boolean `Tensor` of\n shape `[batch_size]`, `next_input`: the next input to feed to `cell`,\n `next_cell_state`: the next state to feed to `cell`,\n and `emit_output`: the output to store for this iteration.\n\n Note that `emit_output` should be a `Tensor` or (possibly nested)\n tuple of tensors with shapes and structure matching `cell.output_size`\n and `cell_output` above. The parameter `cell_state` and output\n `next_cell_state` may be either a single or (possibly nested) tuple\n of tensors. The parameter `loop_state` and\n output `next_loop_state` may be either a single or (possibly nested) tuple\n of `Tensor` and `TensorArray` objects. This last parameter\n may be ignored by `loop_fn` and the return value may be `None`. If it\n is not `None`, then the `loop_state` will be propagated through the RNN\n loop, for use purely by `loop_fn` to keep track of its own state.\n The `next_loop_state` parameter returned may be `None`.\n\n The first call to `loop_fn` will be `time = 0`, `cell_output = None`,\n `cell_state = None`, and `loop_state = None`. For this call:\n The `next_cell_state` value should be the value with which to initialize\n the cell\'s state. It may be a final state from a previous RNN or it\n may be the output of `cell.zero_state()`. It should be a\n (possibly nested) tuple structure of tensors.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of\n appropriate type and shape `[batch_size] + cell.state_size`.\n If `cell.state_size` is a (possibly nested) tuple of ints or\n `TensorShape`, this will be a tuple having the corresponding shapes.\n The `emit_output` value may be either `None` or a (possibly nested)\n tuple structure of tensors, e.g.,\n `(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.\n If this first `emit_output` return value is `None`,\n then the `emit_ta` result of `raw_rnn` will have the same structure and\n dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same\n structure, shapes (prepended with a `batch_size` dimension), and dtypes\n as `emit_output`. The actual values returned for `emit_output` at this\n initializing call are ignored. Note, this emit structure must be\n consistent across all time steps.\n\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n scope: VariableScope for the created subgraph; defaults to "rnn".\n\n Returns:\n A tuple `(emit_ta, final_state, final_loop_state)` where:\n\n `emit_ta`: The RNN output `TensorArray`.\n If `loop_fn` returns a (possibly nested) set of Tensors for\n `emit_output` during initialization, (inputs `time = 0`,\n `cell_output = None`, and `loop_state = None`), then `emit_ta` will\n have the same structure, dtypes, and shapes as `emit_output` instead.\n If `loop_fn` returns `emit_output = None` during this call,\n the structure of `cell.output_size` is used:\n If `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `emit_ta` will be a tuple having the\n same structure as `cell.output_size`, containing TensorArrays whose\n elements\' shapes correspond to the shape data in `cell.output_size`.\n\n `final_state`: The final cell state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes.\n\n `final_loop_state`: The final loop state as returned by `loop_fn`.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not\n a `callable`.\n '
if (not isinstance(cell, rnn_cell_impl._RNNCell)):
raise TypeError('cell must be an instance of RNNCell')
if (not callable(loop_fn)):
raise TypeError('loop_fn must be a callable')
parallel_iterations = (parallel_iterations or 32)
with vs.variable_scope((scope or 'rnn')) as varscope:
if (varscope.caching_device is None):
varscope.set_caching_device((lambda op: op.device))
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure, init_loop_state) = loop_fn(time, None, None, None)
flat_input = nest.flatten(next_input)
loop_state = (init_loop_state if (init_loop_state is not None) else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if (batch_size is None):
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state, flat_sequence=flat_state)
if (emit_structure is not None):
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = ([flat_state[0].dtype] * len(flat_emit_size))
flat_emit_ta = [tensor_array_ops.TensorArray(dtype=dtype_i, dynamic_size=True, size=0, name=('rnn_output_%d' % i)) for (i, dtype_i) in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_emit_ta)
flat_zero_emit = [array_ops.zeros(_state_size_with_prefix(size_i, prefix=[batch_size]), dtype_i) for (size_i, dtype_i) in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input, emit_ta, state, loop_state):
'Internal while loop body for raw_rnn.\n\n Args:\n time: time scalar.\n elements_finished: batch-size vector.\n current_input: possibly nested tuple of input tensors.\n emit_ta: possibly nested tuple of output TensorArrays.\n state: possibly nested tuple of state tensors.\n loop_state: possibly nested tuple of loop state tensors.\n\n Returns:\n Tuple having the same size as Args but with updated values.\n '
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = (time + 1)
(next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = (loop_state if (next_loop_state is None) else next_loop_state)
def _copy_some_through(current, candidate):
'Copy some tensors through via array_ops.where.'
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [ta.write(time, emit) for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(condition, body, loop_vars=[time, elements_finished, next_input, emit_ta, state, loop_state], parallel_iterations=parallel_iterations, swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[(- 3):]
if (init_loop_state is None):
final_loop_state = None
return (emit_ta, final_state, final_loop_state) | -1,963,316,333,818,678,500 | Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`. | tensorflow/python/ops/rnn.py | raw_rnn | gameover27/hiptensorflow | python | def raw_rnn(cell, loop_fn, parallel_iterations=None, swap_memory=False, scope=None):
'Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.\n\n **NOTE: This method is still in testing, and the API may change.**\n\n This function is a more primitive version of `dynamic_rnn` that provides\n more direct access to the inputs each iteration. It also provides more\n control over when to start and finish reading the sequence, and\n what to emit for the output.\n\n For example, it can be used to implement the dynamic decoder of a seq2seq\n model.\n\n Instead of working with `Tensor` objects, most operations work with\n `TensorArray` objects directly.\n\n The operation of `raw_rnn`, in pseudo-code, is basically the following:\n\n ```python\n time = tf.constant(0, dtype=tf.int32)\n (finished, next_input, initial_state, _, loop_state) = loop_fn(\n time=time, cell_output=None, cell_state=None, loop_state=None)\n emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)\n state = initial_state\n while not all(finished):\n (output, cell_state) = cell(next_input, state)\n (next_finished, next_input, next_state, emit, loop_state) = loop_fn(\n time=time + 1, cell_output=output, cell_state=cell_state,\n loop_state=loop_state)\n # Emit zeros and copy forward state for minibatch entries that are finished.\n state = tf.where(finished, state, next_state)\n emit = tf.where(finished, tf.zeros_like(emit), emit)\n emit_ta = emit_ta.write(time, emit)\n # If any new minibatch entries are marked as finished, mark these.\n finished = tf.logical_or(finished, next_finished)\n time += 1\n return (emit_ta, state, loop_state)\n ```\n\n with the additional properties that output and state may be (possibly nested)\n tuples, as determined by `cell.output_size` and `cell.state_size`, and\n as a result the final `state` and `emit_ta` may themselves be tuples.\n\n A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:\n\n ```python\n inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),\n dtype=tf.float32)\n sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)\n inputs_ta = inputs_ta.unstack(inputs)\n\n cell = tf.contrib.rnn.LSTMCell(num_units)\n\n def loop_fn(time, cell_output, cell_state, loop_state):\n emit_output = cell_output # == None for time == 0\n if cell_output is None: # time == 0\n next_cell_state = cell.zero_state(batch_size, tf.float32)\n else:\n next_cell_state = cell_state\n elements_finished = (time >= sequence_length)\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),\n lambda: inputs_ta.read(time))\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)\n outputs = outputs_ta.stack()\n ```\n\n Args:\n cell: An instance of RNNCell.\n loop_fn: A callable that takes inputs\n `(time, cell_output, cell_state, loop_state)`\n and returns the tuple\n `(finished, next_input, next_cell_state, emit_output, next_loop_state)`.\n Here `time` is an int32 scalar `Tensor`, `cell_output` is a\n `Tensor` or (possibly nested) tuple of tensors as determined by\n `cell.output_size`, and `cell_state` is a `Tensor`\n or (possibly nested) tuple of tensors, as determined by the `loop_fn`\n on its first call (and should match `cell.state_size`).\n The outputs are: `finished`, a boolean `Tensor` of\n shape `[batch_size]`, `next_input`: the next input to feed to `cell`,\n `next_cell_state`: the next state to feed to `cell`,\n and `emit_output`: the output to store for this iteration.\n\n Note that `emit_output` should be a `Tensor` or (possibly nested)\n tuple of tensors with shapes and structure matching `cell.output_size`\n and `cell_output` above. The parameter `cell_state` and output\n `next_cell_state` may be either a single or (possibly nested) tuple\n of tensors. The parameter `loop_state` and\n output `next_loop_state` may be either a single or (possibly nested) tuple\n of `Tensor` and `TensorArray` objects. This last parameter\n may be ignored by `loop_fn` and the return value may be `None`. If it\n is not `None`, then the `loop_state` will be propagated through the RNN\n loop, for use purely by `loop_fn` to keep track of its own state.\n The `next_loop_state` parameter returned may be `None`.\n\n The first call to `loop_fn` will be `time = 0`, `cell_output = None`,\n `cell_state = None`, and `loop_state = None`. For this call:\n The `next_cell_state` value should be the value with which to initialize\n the cell\'s state. It may be a final state from a previous RNN or it\n may be the output of `cell.zero_state()`. It should be a\n (possibly nested) tuple structure of tensors.\n If `cell.state_size` is an integer, this must be\n a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.\n If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of\n appropriate type and shape `[batch_size] + cell.state_size`.\n If `cell.state_size` is a (possibly nested) tuple of ints or\n `TensorShape`, this will be a tuple having the corresponding shapes.\n The `emit_output` value may be either `None` or a (possibly nested)\n tuple structure of tensors, e.g.,\n `(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.\n If this first `emit_output` return value is `None`,\n then the `emit_ta` result of `raw_rnn` will have the same structure and\n dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same\n structure, shapes (prepended with a `batch_size` dimension), and dtypes\n as `emit_output`. The actual values returned for `emit_output` at this\n initializing call are ignored. Note, this emit structure must be\n consistent across all time steps.\n\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency\n and can be run in parallel, will be. This parameter trades off\n time for space. Values >> 1 use more memory but take less time,\n while smaller values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs\n which would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n scope: VariableScope for the created subgraph; defaults to "rnn".\n\n Returns:\n A tuple `(emit_ta, final_state, final_loop_state)` where:\n\n `emit_ta`: The RNN output `TensorArray`.\n If `loop_fn` returns a (possibly nested) set of Tensors for\n `emit_output` during initialization, (inputs `time = 0`,\n `cell_output = None`, and `loop_state = None`), then `emit_ta` will\n have the same structure, dtypes, and shapes as `emit_output` instead.\n If `loop_fn` returns `emit_output = None` during this call,\n the structure of `cell.output_size` is used:\n If `cell.output_size` is a (possibly nested) tuple of integers\n or `TensorShape` objects, then `emit_ta` will be a tuple having the\n same structure as `cell.output_size`, containing TensorArrays whose\n elements\' shapes correspond to the shape data in `cell.output_size`.\n\n `final_state`: The final cell state. If `cell.state_size` is an int, this\n will be shaped `[batch_size, cell.state_size]`. If it is a\n `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.\n If it is a (possibly nested) tuple of ints or `TensorShape`, this will\n be a tuple having the corresponding shapes.\n\n `final_loop_state`: The final loop state as returned by `loop_fn`.\n\n Raises:\n TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not\n a `callable`.\n '
if (not isinstance(cell, rnn_cell_impl._RNNCell)):
raise TypeError('cell must be an instance of RNNCell')
if (not callable(loop_fn)):
raise TypeError('loop_fn must be a callable')
parallel_iterations = (parallel_iterations or 32)
with vs.variable_scope((scope or 'rnn')) as varscope:
if (varscope.caching_device is None):
varscope.set_caching_device((lambda op: op.device))
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure, init_loop_state) = loop_fn(time, None, None, None)
flat_input = nest.flatten(next_input)
loop_state = (init_loop_state if (init_loop_state is not None) else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if (batch_size is None):
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state, flat_sequence=flat_state)
if (emit_structure is not None):
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = ([flat_state[0].dtype] * len(flat_emit_size))
flat_emit_ta = [tensor_array_ops.TensorArray(dtype=dtype_i, dynamic_size=True, size=0, name=('rnn_output_%d' % i)) for (i, dtype_i) in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_emit_ta)
flat_zero_emit = [array_ops.zeros(_state_size_with_prefix(size_i, prefix=[batch_size]), dtype_i) for (size_i, dtype_i) in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure, flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input, emit_ta, state, loop_state):
'Internal while loop body for raw_rnn.\n\n Args:\n time: time scalar.\n elements_finished: batch-size vector.\n current_input: possibly nested tuple of input tensors.\n emit_ta: possibly nested tuple of output TensorArrays.\n state: possibly nested tuple of state tensors.\n loop_state: possibly nested tuple of loop state tensors.\n\n Returns:\n Tuple having the same size as Args but with updated values.\n '
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = (time + 1)
(next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = (loop_state if (next_loop_state is None) else next_loop_state)
def _copy_some_through(current, candidate):
'Copy some tensors through via array_ops.where.'
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [ta.write(time, emit) for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(condition, body, loop_vars=[time, elements_finished, next_input, emit_ta, state, loop_state], parallel_iterations=parallel_iterations, swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[(- 3):]
if (init_loop_state is None):
final_loop_state = None
return (emit_ta, final_state, final_loop_state) |
def _maybe_copy_some_through():
'Run RNN step. Pass through either no or some past state.'
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond((time < min_sequence_length), (lambda : (flat_new_output + flat_new_state)), (lambda : _copy_some_through(flat_new_output, flat_new_state))) | -2,520,382,574,250,251,000 | Run RNN step. Pass through either no or some past state. | tensorflow/python/ops/rnn.py | _maybe_copy_some_through | gameover27/hiptensorflow | python | def _maybe_copy_some_through():
(new_output, new_state) = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond((time < min_sequence_length), (lambda : (flat_new_output + flat_new_state)), (lambda : _copy_some_through(flat_new_output, flat_new_state))) |
def _time_step(time, output_ta_t, state):
'Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n '
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = (lambda : cell(input_t, state))
if (sequence_length is not None):
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
return ((time + 1), output_ta_t, new_state) | -4,486,669,498,042,699,000 | Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state). | tensorflow/python/ops/rnn.py | _time_step | gameover27/hiptensorflow | python | def _time_step(time, output_ta_t, state):
'Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n '
input_t = tuple((ta.read(time) for ta in input_ta))
for (input_, shape) in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = (lambda : cell(input_t, state))
if (sequence_length is not None):
(output, new_state) = _rnn_step(time=time, sequence_length=sequence_length, min_sequence_length=min_sequence_length, max_sequence_length=max_sequence_length, zero_output=zero_output, state=state, call_cell=call_cell, state_size=state_size, skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple((ta.write(time, out) for (ta, out) in zip(output_ta_t, output)))
return ((time + 1), output_ta_t, new_state) |
def body(time, elements_finished, current_input, emit_ta, state, loop_state):
'Internal while loop body for raw_rnn.\n\n Args:\n time: time scalar.\n elements_finished: batch-size vector.\n current_input: possibly nested tuple of input tensors.\n emit_ta: possibly nested tuple of output TensorArrays.\n state: possibly nested tuple of state tensors.\n loop_state: possibly nested tuple of loop state tensors.\n\n Returns:\n Tuple having the same size as Args but with updated values.\n '
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = (time + 1)
(next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = (loop_state if (next_loop_state is None) else next_loop_state)
def _copy_some_through(current, candidate):
'Copy some tensors through via array_ops.where.'
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [ta.write(time, emit) for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state) | 1,539,779,915,144,872,400 | Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values. | tensorflow/python/ops/rnn.py | body | gameover27/hiptensorflow | python | def body(time, elements_finished, current_input, emit_ta, state, loop_state):
'Internal while loop body for raw_rnn.\n\n Args:\n time: time scalar.\n elements_finished: batch-size vector.\n current_input: possibly nested tuple of input tensors.\n emit_ta: possibly nested tuple of output TensorArrays.\n state: possibly nested tuple of state tensors.\n loop_state: possibly nested tuple of loop state tensors.\n\n Returns:\n Tuple having the same size as Args but with updated values.\n '
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = (time + 1)
(next_finished, next_input, next_state, emit_output, next_loop_state) = loop_fn(next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = (loop_state if (next_loop_state is None) else next_loop_state)
def _copy_some_through(current, candidate):
'Copy some tensors through via array_ops.where.'
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [ta.write(time, emit) for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input, emit_ta, next_state, loop_state) |
def _copy_some_through(current, candidate):
'Copy some tensors through via array_ops.where.'
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat) | 4,694,326,597,159,489,000 | Copy some tensors through via array_ops.where. | tensorflow/python/ops/rnn.py | _copy_some_through | gameover27/hiptensorflow | python | def _copy_some_through(current, candidate):
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [_on_device((lambda : array_ops.where(elements_finished, current_i, candidate_i)), device=candidate_i.op.device) for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(structure=current, flat_sequence=result_flat) |
def extract_app(self, arch):
'\n\t\tReturn an `App` object from this TAB. You must specify the desired\n\t\tMCU architecture so the correct binary can be retrieved.\n\t\t'
binary_tarinfo = self.tab.getmember('{}.bin'.format(arch))
binary = self.tab.extractfile(binary_tarinfo).read()
tbfh = TBFHeader(binary)
if tbfh.is_valid():
name_or_params = tbfh.get_app_name()
if isinstance(name_or_params, str):
name = name_or_params
else:
start = name_or_params[0]
end = (start + name_or_params[1])
name = binary[start:end].decode('utf-8')
if (tbfh.get_app_size() < len(binary)):
raise TockLoaderException('Invalid TAB, the app binary is longer than its defined total_size')
return App(tbfh, None, name, binary)
else:
raise TockLoaderException('Invalid TBF found in app in TAB') | -8,530,720,299,191,283,000 | Return an `App` object from this TAB. You must specify the desired
MCU architecture so the correct binary can be retrieved. | tockloader/tab.py | extract_app | torfmaster/tockloader | python | def extract_app(self, arch):
'\n\t\tReturn an `App` object from this TAB. You must specify the desired\n\t\tMCU architecture so the correct binary can be retrieved.\n\t\t'
binary_tarinfo = self.tab.getmember('{}.bin'.format(arch))
binary = self.tab.extractfile(binary_tarinfo).read()
tbfh = TBFHeader(binary)
if tbfh.is_valid():
name_or_params = tbfh.get_app_name()
if isinstance(name_or_params, str):
name = name_or_params
else:
start = name_or_params[0]
end = (start + name_or_params[1])
name = binary[start:end].decode('utf-8')
if (tbfh.get_app_size() < len(binary)):
raise TockLoaderException('Invalid TAB, the app binary is longer than its defined total_size')
return App(tbfh, None, name, binary)
else:
raise TockLoaderException('Invalid TBF found in app in TAB') |
def is_compatible_with_board(self, board):
'\n\t\tCheck if the Tock app is compatible with a particular Tock board.\n\t\t'
metadata = self.parse_metadata()
if (metadata['tab-version'] == 1):
return (('only-for-boards' not in metadata) or (board in metadata['only-for-boards']) or (metadata['only-for-boards'] == ''))
else:
raise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version'])) | -8,559,294,918,589,570,000 | Check if the Tock app is compatible with a particular Tock board. | tockloader/tab.py | is_compatible_with_board | torfmaster/tockloader | python | def is_compatible_with_board(self, board):
'\n\t\t\n\t\t'
metadata = self.parse_metadata()
if (metadata['tab-version'] == 1):
return (('only-for-boards' not in metadata) or (board in metadata['only-for-boards']) or (metadata['only-for-boards'] == ))
else:
raise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version'])) |
def parse_metadata(self):
'\n\t\tOpen and parse the included metadata file in the TAB.\n\t\t'
metadata_tarinfo = self.tab.getmember('metadata.toml')
metadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')
return pytoml.loads(metadata_str) | -3,623,499,767,396,370,000 | Open and parse the included metadata file in the TAB. | tockloader/tab.py | parse_metadata | torfmaster/tockloader | python | def parse_metadata(self):
'\n\t\t\n\t\t'
metadata_tarinfo = self.tab.getmember('metadata.toml')
metadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')
return pytoml.loads(metadata_str) |
def get_supported_architectures(self):
'\n\t\tReturn a list of architectures that this TAB has compiled binaries for.\n\t\t'
contained_files = self.tab.getnames()
return [i[:(- 4)] for i in contained_files if (i[(- 4):] == '.bin')] | -3,800,365,372,095,071,000 | Return a list of architectures that this TAB has compiled binaries for. | tockloader/tab.py | get_supported_architectures | torfmaster/tockloader | python | def get_supported_architectures(self):
'\n\t\t\n\t\t'
contained_files = self.tab.getnames()
return [i[:(- 4)] for i in contained_files if (i[(- 4):] == '.bin')] |
def get_tbf_header(self):
'\n\t\tReturn a TBFHeader object with the TBF header from the app in the TAB.\n\t\tTBF headers are not architecture specific, so we pull from a random\n\t\tbinary if there are multiple architectures supported.\n\t\t'
for f in self.tab.getnames():
if (f[(- 4):] == '.bin'):
binary_tarinfo = self.tab.getmember(f)
binary = self.tab.extractfile(binary_tarinfo).read()
return TBFHeader(binary)
return None | -604,436,729,304,759,000 | Return a TBFHeader object with the TBF header from the app in the TAB.
TBF headers are not architecture specific, so we pull from a random
binary if there are multiple architectures supported. | tockloader/tab.py | get_tbf_header | torfmaster/tockloader | python | def get_tbf_header(self):
'\n\t\tReturn a TBFHeader object with the TBF header from the app in the TAB.\n\t\tTBF headers are not architecture specific, so we pull from a random\n\t\tbinary if there are multiple architectures supported.\n\t\t'
for f in self.tab.getnames():
if (f[(- 4):] == '.bin'):
binary_tarinfo = self.tab.getmember(f)
binary = self.tab.extractfile(binary_tarinfo).read()
return TBFHeader(binary)
return None |
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString}
response_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString}
method_implementations = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages)}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options) | -7,068,467,836,664,310,000 | The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0 | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | beta_create_ImageAnnotator_server | Alexander-Minyushkin/google-cloud-python | python | def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString}
response_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString}
method_implementations = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages)}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options) |
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString}
response_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString}
cardinalities = {'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options) | 2,554,020,353,311,455,000 | The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0 | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | beta_create_ImageAnnotator_stub | Alexander-Minyushkin/google-cloud-python | python | def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString}
response_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString}
cardinalities = {'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options) |
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.BatchAnnotateImages = channel.unary_unary('/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=BatchAnnotateImagesRequest.SerializeToString, response_deserializer=BatchAnnotateImagesResponse.FromString) | 7,415,371,588,454,651,000 | Constructor.
Args:
channel: A grpc.Channel. | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | __init__ | Alexander-Minyushkin/google-cloud-python | python | def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.BatchAnnotateImages = channel.unary_unary('/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=BatchAnnotateImagesRequest.SerializeToString, response_deserializer=BatchAnnotateImagesResponse.FromString) |
def BatchAnnotateImages(self, request, context):
'Run image detection and annotation for a batch of images.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | 3,685,372,308,543,791,600 | Run image detection and annotation for a batch of images. | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | BatchAnnotateImages | Alexander-Minyushkin/google-cloud-python | python | def BatchAnnotateImages(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
def BatchAnnotateImages(self, request, context):
'Run image detection and annotation for a batch of images.\n '
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) | 823,033,015,200,478,100 | Run image detection and annotation for a batch of images. | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | BatchAnnotateImages | Alexander-Minyushkin/google-cloud-python | python | def BatchAnnotateImages(self, request, context):
'\n '
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) |
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
'Run image detection and annotation for a batch of images.\n '
raise NotImplementedError() | -2,228,279,714,628,205,600 | Run image detection and annotation for a batch of images. | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | BatchAnnotateImages | Alexander-Minyushkin/google-cloud-python | python | def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
'\n '
raise NotImplementedError() |
@property
def V_max(self):
'[float] The designed maximum liquid volume, not accounting for increased volume due to aeration, in m^3.'
return self._V_max | -3,506,608,882,236,248,600 | [float] The designed maximum liquid volume, not accounting for increased volume due to aeration, in m^3. | qsdsan/sanunits/_suspended_growth_bioreactor.py | V_max | QSD-for-WaSH/sanitation | python | @property
def V_max(self):
return self._V_max |
@property
def aeration(self):
'[:class:`Process` or float or NoneType] Aeration model.'
return self._aeration | -4,594,160,389,946,629,000 | [:class:`Process` or float or NoneType] Aeration model. | qsdsan/sanunits/_suspended_growth_bioreactor.py | aeration | QSD-for-WaSH/sanitation | python | @property
def aeration(self):
return self._aeration |
@property
def suspended_growth_model(self):
'[:class:`CompiledProcesses` or NoneType] Suspended growth model.'
return self._model | -1,955,942,594,984,201,000 | [:class:`CompiledProcesses` or NoneType] Suspended growth model. | qsdsan/sanunits/_suspended_growth_bioreactor.py | suspended_growth_model | QSD-for-WaSH/sanitation | python | @property
def suspended_growth_model(self):
return self._model |
@property
def DO_ID(self):
'[str] The `Component` ID for dissolved oxygen used in the suspended growth model and the aeration model.'
return self._DO_ID | -6,349,589,402,538,691,000 | [str] The `Component` ID for dissolved oxygen used in the suspended growth model and the aeration model. | qsdsan/sanunits/_suspended_growth_bioreactor.py | DO_ID | QSD-for-WaSH/sanitation | python | @property
def DO_ID(self):
return self._DO_ID |
@property
def split(self):
'[numpy.1darray or NoneType] The volumetric split of outs.'
return self._split | 2,645,588,217,702,961,000 | [numpy.1darray or NoneType] The volumetric split of outs. | qsdsan/sanunits/_suspended_growth_bioreactor.py | split | QSD-for-WaSH/sanitation | python | @property
def split(self):
return self._split |
@property
def state(self):
'The state of the CSTR, including component concentrations [mg/L] and flow rate [m^3/d].'
if (self._state is None):
return None
else:
return dict(zip((list(self.components.IDs) + ['Q']), self._state)) | -5,897,520,368,430,410,000 | The state of the CSTR, including component concentrations [mg/L] and flow rate [m^3/d]. | qsdsan/sanunits/_suspended_growth_bioreactor.py | state | QSD-for-WaSH/sanitation | python | @property
def state(self):
if (self._state is None):
return None
else:
return dict(zip((list(self.components.IDs) + ['Q']), self._state)) |
def set_init_conc(self, **kwargs):
'set the initial concentrations [mg/L] of the CSTR.'
Cs = np.zeros(len(self.components))
cmpx = self.components.index
for (k, v) in kwargs.items():
Cs[cmpx(k)] = v
self._concs = Cs | 2,782,257,569,388,788,700 | set the initial concentrations [mg/L] of the CSTR. | qsdsan/sanunits/_suspended_growth_bioreactor.py | set_init_conc | QSD-for-WaSH/sanitation | python | def set_init_conc(self, **kwargs):
Cs = np.zeros(len(self.components))
cmpx = self.components.index
for (k, v) in kwargs.items():
Cs[cmpx(k)] = v
self._concs = Cs |
def _run(self):
'Only to converge volumetric flows.'
mixed = self._mixed
mixed.mix_from(self.ins)
Q = mixed.F_vol
if (self.split is None):
self.outs[0].copy_like(mixed)
else:
for (ws, spl) in zip(self._outs, self.split):
ws.copy_like(mixed)
ws.set_total_flow((Q * spl), 'm3/hr') | 8,490,941,481,928,547,000 | Only to converge volumetric flows. | qsdsan/sanunits/_suspended_growth_bioreactor.py | _run | QSD-for-WaSH/sanitation | python | def _run(self):
mixed = self._mixed
mixed.mix_from(self.ins)
Q = mixed.F_vol
if (self.split is None):
self.outs[0].copy_like(mixed)
else:
for (ws, spl) in zip(self._outs, self.split):
ws.copy_like(mixed)
ws.set_total_flow((Q * spl), 'm3/hr') |
def docstring_values(values, indent=8):
'\n Formats a dictionary of values for inclusion in a docstring.\n '
return ('\n' + (' ' * indent)).join((("* ``'%s'``" % k) for (k, v) in sorted(values.items(), key=itemgetter(1)))) | 8,566,033,594,727,782,000 | Formats a dictionary of values for inclusion in a docstring. | picamera/camera.py | docstring_values | RobertLucian/picamera | python | def docstring_values(values, indent=8):
'\n \n '
return ('\n' + (' ' * indent)).join((("* ``'%s'``" % k) for (k, v) in sorted(values.items(), key=itemgetter(1)))) |
def _check_camera_open(self):
'\n Raise an exception if the camera is already closed, or if the camera\n has encountered a fatal error.\n '
(exc, self._camera_exception) = (self._camera_exception, None)
if exc:
raise exc
if self.closed:
raise PiCameraClosed('Camera is closed') | -7,220,415,521,090,356,000 | Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error. | picamera/camera.py | _check_camera_open | RobertLucian/picamera | python | def _check_camera_open(self):
'\n Raise an exception if the camera is already closed, or if the camera\n has encountered a fatal error.\n '
(exc, self._camera_exception) = (self._camera_exception, None)
if exc:
raise exc
if self.closed:
raise PiCameraClosed('Camera is closed') |
def _check_recording_stopped(self):
'\n Raise an exception if the camera is currently recording.\n '
if self.recording:
raise PiCameraRuntimeError('Recording is currently running') | -6,894,281,195,436,221,000 | Raise an exception if the camera is currently recording. | picamera/camera.py | _check_recording_stopped | RobertLucian/picamera | python | def _check_recording_stopped(self):
'\n \n '
if self.recording:
raise PiCameraRuntimeError('Recording is currently running') |
def _get_ports(self, from_video_port, splitter_port):
"\n Determine the camera and output ports for given capture options.\n\n See :ref:`camera_hardware` for more information on picamera's usage of\n camera, splitter, and encoder ports. The general idea here is that the\n capture (still) port operates on its own, while the video port is\n always connected to a splitter component, so requests for a video port\n also have to specify which splitter port they want to use.\n "
self._check_camera_open()
if (from_video_port and (splitter_port in self._encoders)):
raise PiCameraAlreadyRecording(('The camera is already using port %d ' % splitter_port))
camera_port = (self._camera.outputs[self.CAMERA_VIDEO_PORT] if from_video_port else self._camera.outputs[self.CAMERA_CAPTURE_PORT])
output_port = (self._splitter.outputs[splitter_port] if from_video_port else camera_port)
return (camera_port, output_port) | 5,066,261,328,255,325,000 | Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use. | picamera/camera.py | _get_ports | RobertLucian/picamera | python | def _get_ports(self, from_video_port, splitter_port):
"\n Determine the camera and output ports for given capture options.\n\n See :ref:`camera_hardware` for more information on picamera's usage of\n camera, splitter, and encoder ports. The general idea here is that the\n capture (still) port operates on its own, while the video port is\n always connected to a splitter component, so requests for a video port\n also have to specify which splitter port they want to use.\n "
self._check_camera_open()
if (from_video_port and (splitter_port in self._encoders)):
raise PiCameraAlreadyRecording(('The camera is already using port %d ' % splitter_port))
camera_port = (self._camera.outputs[self.CAMERA_VIDEO_PORT] if from_video_port else self._camera.outputs[self.CAMERA_CAPTURE_PORT])
output_port = (self._splitter.outputs[splitter_port] if from_video_port else camera_port)
return (camera_port, output_port) |
def _get_output_format(self, output):
'\n Given an output object, attempt to determine the requested format.\n\n We attempt to determine the filename of the *output* object and derive\n a MIME type from the extension. If *output* has no filename, an error\n is raised.\n '
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError('Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if (not type):
raise PiCameraValueError(('Unable to determine type from filename %s' % filename))
return type | -5,423,043,728,053,178,000 | Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised. | picamera/camera.py | _get_output_format | RobertLucian/picamera | python | def _get_output_format(self, output):
'\n Given an output object, attempt to determine the requested format.\n\n We attempt to determine the filename of the *output* object and derive\n a MIME type from the extension. If *output* has no filename, an error\n is raised.\n '
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError('Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if (not type):
raise PiCameraValueError(('Unable to determine type from filename %s' % filename))
return type |
def _get_image_format(self, output, format=None):
'\n Given an output object and an optional format, attempt to determine the\n requested image format.\n\n This method is used by all capture methods to determine the requested\n output format. If *format* is specified as a MIME-type the "image/"\n prefix is stripped. If *format* is not specified, then\n :meth:`_get_output_format` will be called to attempt to determine\n format from the *output* object.\n '
if isinstance(format, bytes):
format = format.decode('utf-8')
format = (format or self._get_output_format(output))
format = (format[6:] if format.startswith('image/') else format)
if (format == 'x-ms-bmp'):
format = 'bmp'
if (format == 'raw'):
format = self.raw_format
return format | -5,771,226,095,761,120,000 | Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object. | picamera/camera.py | _get_image_format | RobertLucian/picamera | python | def _get_image_format(self, output, format=None):
'\n Given an output object and an optional format, attempt to determine the\n requested image format.\n\n This method is used by all capture methods to determine the requested\n output format. If *format* is specified as a MIME-type the "image/"\n prefix is stripped. If *format* is not specified, then\n :meth:`_get_output_format` will be called to attempt to determine\n format from the *output* object.\n '
if isinstance(format, bytes):
format = format.decode('utf-8')
format = (format or self._get_output_format(output))
format = (format[6:] if format.startswith('image/') else format)
if (format == 'x-ms-bmp'):
format = 'bmp'
if (format == 'raw'):
format = self.raw_format
return format |
def _get_video_format(self, output, format=None):
'\n Given an output object and an optional format, attempt to determine the\n requested video format.\n\n This method is used by all recording methods to determine the requested\n output format. If *format* is specified as a MIME-type the "video/" or\n "application/" prefix will be stripped. If *format* is not specified,\n then :meth:`_get_output_format` will be called to attempt to determine\n format from the *output* object.\n '
if isinstance(format, bytes):
format = format.decode('utf-8')
format = (format or self._get_output_format(output))
format = (format[6:] if format.startswith('video/') else (format[12:] if format.startswith('application/') else format))
return format | 2,566,541,926,593,424,400 | Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object. | picamera/camera.py | _get_video_format | RobertLucian/picamera | python | def _get_video_format(self, output, format=None):
'\n Given an output object and an optional format, attempt to determine the\n requested video format.\n\n This method is used by all recording methods to determine the requested\n output format. If *format* is specified as a MIME-type the "video/" or\n "application/" prefix will be stripped. If *format* is not specified,\n then :meth:`_get_output_format` will be called to attempt to determine\n format from the *output* object.\n '
if isinstance(format, bytes):
format = format.decode('utf-8')
format = (format or self._get_output_format(output))
format = (format[6:] if format.startswith('video/') else (format[12:] if format.startswith('application/') else format))
return format |
def _get_image_encoder(self, camera_port, output_port, format, resize, **options):
"\n Construct an image encoder for the requested parameters.\n\n This method is called by :meth:`capture` and :meth:`capture_continuous`\n to construct an image encoder. The *camera_port* parameter gives the\n MMAL camera port that should be enabled for capture by the encoder. The\n *output_port* parameter gives the MMAL port that the encoder should\n read output from (this may be the same as the camera port, but may be\n different if other component(s) like a splitter have been placed in the\n pipeline). The *format* parameter indicates the image format and will\n be one of:\n\n * ``'jpeg'``\n * ``'png'``\n * ``'gif'``\n * ``'bmp'``\n * ``'yuv'``\n * ``'rgb'``\n * ``'rgba'``\n * ``'bgr'``\n * ``'bgra'``\n\n The *resize* parameter indicates the size that the encoder should\n resize the output to (presumably by including a resizer in the\n pipeline). Finally, *options* includes extra keyword arguments that\n should be passed verbatim to the encoder.\n "
encoder_class = (PiRawOneImageEncoder if (format in self.RAW_FORMATS) else PiCookedOneImageEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) | -7,457,254,361,250,503,000 | Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder. | picamera/camera.py | _get_image_encoder | RobertLucian/picamera | python | def _get_image_encoder(self, camera_port, output_port, format, resize, **options):
"\n Construct an image encoder for the requested parameters.\n\n This method is called by :meth:`capture` and :meth:`capture_continuous`\n to construct an image encoder. The *camera_port* parameter gives the\n MMAL camera port that should be enabled for capture by the encoder. The\n *output_port* parameter gives the MMAL port that the encoder should\n read output from (this may be the same as the camera port, but may be\n different if other component(s) like a splitter have been placed in the\n pipeline). The *format* parameter indicates the image format and will\n be one of:\n\n * ``'jpeg'``\n * ``'png'``\n * ``'gif'``\n * ``'bmp'``\n * ``'yuv'``\n * ``'rgb'``\n * ``'rgba'``\n * ``'bgr'``\n * ``'bgra'``\n\n The *resize* parameter indicates the size that the encoder should\n resize the output to (presumably by including a resizer in the\n pipeline). Finally, *options* includes extra keyword arguments that\n should be passed verbatim to the encoder.\n "
encoder_class = (PiRawOneImageEncoder if (format in self.RAW_FORMATS) else PiCookedOneImageEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) |
def _get_images_encoder(self, camera_port, output_port, format, resize, **options):
'\n Construct a multi-image encoder for the requested parameters.\n\n This method is largely equivalent to :meth:`_get_image_encoder` with\n the exception that the encoder returned should expect to be passed an\n iterable of outputs to its :meth:`~PiEncoder.start` method, rather than\n a single output object. This method is called by the\n :meth:`capture_sequence` method.\n\n All parameters are the same as in :meth:`_get_image_encoder`. Please\n refer to the documentation for that method for further information.\n '
encoder_class = (PiRawMultiImageEncoder if (format in self.RAW_FORMATS) else PiCookedMultiImageEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) | -3,635,200,426,073,873,400 | Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information. | picamera/camera.py | _get_images_encoder | RobertLucian/picamera | python | def _get_images_encoder(self, camera_port, output_port, format, resize, **options):
'\n Construct a multi-image encoder for the requested parameters.\n\n This method is largely equivalent to :meth:`_get_image_encoder` with\n the exception that the encoder returned should expect to be passed an\n iterable of outputs to its :meth:`~PiEncoder.start` method, rather than\n a single output object. This method is called by the\n :meth:`capture_sequence` method.\n\n All parameters are the same as in :meth:`_get_image_encoder`. Please\n refer to the documentation for that method for further information.\n '
encoder_class = (PiRawMultiImageEncoder if (format in self.RAW_FORMATS) else PiCookedMultiImageEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) |
def _get_video_encoder(self, camera_port, output_port, format, resize, **options):
"\n Construct a video encoder for the requested parameters.\n\n This method is called by :meth:`start_recording` and\n :meth:`record_sequence` to construct a video encoder. The\n *camera_port* parameter gives the MMAL camera port that should be\n enabled for capture by the encoder. The *output_port* parameter gives\n the MMAL port that the encoder should read output from (this may be the\n same as the camera port, but may be different if other component(s)\n like a splitter have been placed in the pipeline). The *format*\n parameter indicates the video format and will be one of:\n\n * ``'h264'``\n * ``'mjpeg'``\n\n The *resize* parameter indicates the size that the encoder should\n resize the output to (presumably by including a resizer in the\n pipeline). Finally, *options* includes extra keyword arguments that\n should be passed verbatim to the encoder.\n "
encoder_class = (PiRawVideoEncoder if (format in self.RAW_FORMATS) else PiCookedVideoEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) | 6,569,485,216,671,746,000 | Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder. | picamera/camera.py | _get_video_encoder | RobertLucian/picamera | python | def _get_video_encoder(self, camera_port, output_port, format, resize, **options):
"\n Construct a video encoder for the requested parameters.\n\n This method is called by :meth:`start_recording` and\n :meth:`record_sequence` to construct a video encoder. The\n *camera_port* parameter gives the MMAL camera port that should be\n enabled for capture by the encoder. The *output_port* parameter gives\n the MMAL port that the encoder should read output from (this may be the\n same as the camera port, but may be different if other component(s)\n like a splitter have been placed in the pipeline). The *format*\n parameter indicates the video format and will be one of:\n\n * ``'h264'``\n * ``'mjpeg'``\n\n The *resize* parameter indicates the size that the encoder should\n resize the output to (presumably by including a resizer in the\n pipeline). Finally, *options* includes extra keyword arguments that\n should be passed verbatim to the encoder.\n "
encoder_class = (PiRawVideoEncoder if (format in self.RAW_FORMATS) else PiCookedVideoEncoder)
return encoder_class(self, camera_port, output_port, format, resize, **options) |
def close(self):
'\n Finalizes the state of the camera.\n\n After successfully constructing a :class:`PiCamera` object, you should\n ensure you call the :meth:`close` method once you are finished with the\n camera (e.g. in the ``finally`` section of a ``try..finally`` block).\n This method stops all recording and preview activities and releases all\n resources associated with the camera; this is necessary to prevent GPU\n memory leaks.\n '
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert (not self.recording)
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
(exc, self._camera_exception) = (self._camera_exception, None)
if exc:
raise exc | -1,952,082,261,474,689,300 | Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks. | picamera/camera.py | close | RobertLucian/picamera | python | def close(self):
'\n Finalizes the state of the camera.\n\n After successfully constructing a :class:`PiCamera` object, you should\n ensure you call the :meth:`close` method once you are finished with the\n camera (e.g. in the ``finally`` section of a ``try..finally`` block).\n This method stops all recording and preview activities and releases all\n resources associated with the camera; this is necessary to prevent GPU\n memory leaks.\n '
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert (not self.recording)
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
(exc, self._camera_exception) = (self._camera_exception, None)
if exc:
raise exc |
def start_preview(self, **options):
'\n Displays the preview overlay.\n\n This method starts a camera preview as an overlay on the Pi\'s primary\n display (HDMI or composite). A :class:`PiRenderer` instance (more\n specifically, a :class:`PiPreviewRenderer`) is constructed with the\n keyword arguments captured in *options*, and is returned from the\n method (this instance is also accessible from the :attr:`preview`\n attribute for as long as the renderer remains active). By default, the\n renderer will be opaque and fullscreen.\n\n This means the default preview overrides whatever is currently visible\n on the display. More specifically, the preview does not rely on a\n graphical environment like X-Windows (it can run quite happily from a\n TTY console); it is simply an overlay on the Pi\'s video output. To stop\n the preview and reveal the display again, call :meth:`stop_preview`.\n The preview can be started and stopped multiple times during the\n lifetime of the :class:`PiCamera` object.\n\n All other camera properties can be modified "live" while the preview is\n running (e.g. :attr:`brightness`).\n\n .. note::\n\n Because the default preview typically obscures the screen, ensure\n you have a means of stopping a preview before starting one. If the\n preview obscures your interactive console you won\'t be able to\n Alt+Tab back to it as the preview isn\'t in a window. If you are in\n an interactive Python session, simply pressing Ctrl+D usually\n suffices to terminate the environment, including the camera and its\n associated preview.\n '
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer | 5,153,514,928,868,468,000 | Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview. | picamera/camera.py | start_preview | RobertLucian/picamera | python | def start_preview(self, **options):
'\n Displays the preview overlay.\n\n This method starts a camera preview as an overlay on the Pi\'s primary\n display (HDMI or composite). A :class:`PiRenderer` instance (more\n specifically, a :class:`PiPreviewRenderer`) is constructed with the\n keyword arguments captured in *options*, and is returned from the\n method (this instance is also accessible from the :attr:`preview`\n attribute for as long as the renderer remains active). By default, the\n renderer will be opaque and fullscreen.\n\n This means the default preview overrides whatever is currently visible\n on the display. More specifically, the preview does not rely on a\n graphical environment like X-Windows (it can run quite happily from a\n TTY console); it is simply an overlay on the Pi\'s video output. To stop\n the preview and reveal the display again, call :meth:`stop_preview`.\n The preview can be started and stopped multiple times during the\n lifetime of the :class:`PiCamera` object.\n\n All other camera properties can be modified "live" while the preview is\n running (e.g. :attr:`brightness`).\n\n .. note::\n\n Because the default preview typically obscures the screen, ensure\n you have a means of stopping a preview before starting one. If the\n preview obscures your interactive console you won\'t be able to\n Alt+Tab back to it as the preview isn\'t in a window. If you are in\n an interactive Python session, simply pressing Ctrl+D usually\n suffices to terminate the environment, including the camera and its\n associated preview.\n '
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer |
def stop_preview(self):
'\n Hides the preview overlay.\n\n If :meth:`start_preview` has previously been called, this method shuts\n down the preview display which generally results in the underlying\n display becoming visible again. If a preview is not currently running,\n no exception is raised - the method will simply do nothing.\n '
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(self, self._camera.outputs[self.CAMERA_PREVIEW_PORT]) | 3,200,172,221,825,403,400 | Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing. | picamera/camera.py | stop_preview | RobertLucian/picamera | python | def stop_preview(self):
'\n Hides the preview overlay.\n\n If :meth:`start_preview` has previously been called, this method shuts\n down the preview display which generally results in the underlying\n display becoming visible again. If a preview is not currently running,\n no exception is raised - the method will simply do nothing.\n '
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(self, self._camera.outputs[self.CAMERA_PREVIEW_PORT]) |
def add_overlay(self, source, size=None, format=None, **options):
'\n Adds a static overlay to the preview output.\n\n This method creates a new static overlay using the same rendering\n mechanism as the preview. Overlays will appear on the Pi\'s video\n output, but will not appear in captures or video recordings. Multiple\n overlays can exist; each call to :meth:`add_overlay` returns a new\n :class:`PiOverlayRenderer` instance representing the overlay.\n\n The *source* must be an object that supports the :ref:`buffer protocol\n <bufferobjects>` in one of the supported unencoded formats: ``\'yuv\'``,\n ``\'rgb\'``, ``\'rgba\'``, ``\'bgr\'``, or ``\'bgra\'``. The format can\n specified explicitly with the optional *format* parameter. If not\n specified, the method will attempt to guess the format based on the\n length of *source* and the *size* (assuming 3 bytes per pixel for RGB,\n and 4 bytes for RGBA).\n\n The optional *size* parameter specifies the size of the source image as\n a ``(width, height)`` tuple. If this is omitted or ``None`` then the\n size is assumed to be the same as the camera\'s current\n :attr:`resolution`.\n\n The length of *source* must take into account that widths are rounded\n up to the nearest multiple of 32, and heights to the nearest multiple\n of 16. For example, if *size* is ``(1280, 720)``, and *format* is\n ``\'rgb\'``, then *source* must be a buffer with length 1280 × 720 × 3\n bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is\n a multiple of 16 no extra rounding is required). However, if *size* is\n ``(97, 57)``, and *format* is ``\'rgb\'`` then *source* must be a buffer\n with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column\n 97 and row 57 in the source will be ignored).\n\n New overlays default to *layer* 0, whilst the preview defaults to layer\n 2. Higher numbered layers obscure lower numbered layers, hence new\n overlays will be invisible (if the preview is running) by default. You\n can make the new overlay visible either by making any existing preview\n transparent (with the :attr:`~PiRenderer.alpha` property) or by moving\n the overlay into a layer higher than the preview (with the\n :attr:`~PiRenderer.layer` property).\n\n All keyword arguments captured in *options* are passed onto the\n :class:`PiRenderer` constructor. All camera properties except\n :attr:`resolution` and :attr:`framerate` can be modified while overlays\n exist. The reason for these exceptions is that the overlay has a static\n resolution and changing the camera\'s mode would require resizing of the\n source.\n\n .. warning::\n\n If too many overlays are added, the display output will be disabled\n and a reboot will generally be required to restore the display.\n Overlays are composited "on the fly". Hence, a real-time constraint\n exists wherein for each horizontal line of HDMI output, the content\n of all source layers must be fetched, resized, converted, and\n blended to produce the output pixels.\n\n If enough overlays exist (where "enough" is a number dependent on\n overlay size, display resolution, bus frequency, and several other\n factors making it unrealistic to calculate in advance), this\n process breaks down and video output fails. One solution is to add\n ``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of\n an off-screen buffer. Be aware that this requires more GPU memory\n and may reduce the update rate.\n\n .. _RGB: https://en.wikipedia.org/wiki/RGB\n .. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space\n\n .. versionadded:: 1.8\n\n .. versionchanged:: 1.13\n Added *format* parameter\n '
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer | -4,768,258,809,790,032,000 | Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter | picamera/camera.py | add_overlay | RobertLucian/picamera | python | def add_overlay(self, source, size=None, format=None, **options):
'\n Adds a static overlay to the preview output.\n\n This method creates a new static overlay using the same rendering\n mechanism as the preview. Overlays will appear on the Pi\'s video\n output, but will not appear in captures or video recordings. Multiple\n overlays can exist; each call to :meth:`add_overlay` returns a new\n :class:`PiOverlayRenderer` instance representing the overlay.\n\n The *source* must be an object that supports the :ref:`buffer protocol\n <bufferobjects>` in one of the supported unencoded formats: ``\'yuv\'``,\n ``\'rgb\'``, ``\'rgba\'``, ``\'bgr\'``, or ``\'bgra\'``. The format can\n specified explicitly with the optional *format* parameter. If not\n specified, the method will attempt to guess the format based on the\n length of *source* and the *size* (assuming 3 bytes per pixel for RGB,\n and 4 bytes for RGBA).\n\n The optional *size* parameter specifies the size of the source image as\n a ``(width, height)`` tuple. If this is omitted or ``None`` then the\n size is assumed to be the same as the camera\'s current\n :attr:`resolution`.\n\n The length of *source* must take into account that widths are rounded\n up to the nearest multiple of 32, and heights to the nearest multiple\n of 16. For example, if *size* is ``(1280, 720)``, and *format* is\n ``\'rgb\'``, then *source* must be a buffer with length 1280 × 720 × 3\n bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is\n a multiple of 16 no extra rounding is required). However, if *size* is\n ``(97, 57)``, and *format* is ``\'rgb\'`` then *source* must be a buffer\n with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column\n 97 and row 57 in the source will be ignored).\n\n New overlays default to *layer* 0, whilst the preview defaults to layer\n 2. Higher numbered layers obscure lower numbered layers, hence new\n overlays will be invisible (if the preview is running) by default. You\n can make the new overlay visible either by making any existing preview\n transparent (with the :attr:`~PiRenderer.alpha` property) or by moving\n the overlay into a layer higher than the preview (with the\n :attr:`~PiRenderer.layer` property).\n\n All keyword arguments captured in *options* are passed onto the\n :class:`PiRenderer` constructor. All camera properties except\n :attr:`resolution` and :attr:`framerate` can be modified while overlays\n exist. The reason for these exceptions is that the overlay has a static\n resolution and changing the camera\'s mode would require resizing of the\n source.\n\n .. warning::\n\n If too many overlays are added, the display output will be disabled\n and a reboot will generally be required to restore the display.\n Overlays are composited "on the fly". Hence, a real-time constraint\n exists wherein for each horizontal line of HDMI output, the content\n of all source layers must be fetched, resized, converted, and\n blended to produce the output pixels.\n\n If enough overlays exist (where "enough" is a number dependent on\n overlay size, display resolution, bus frequency, and several other\n factors making it unrealistic to calculate in advance), this\n process breaks down and video output fails. One solution is to add\n ``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of\n an off-screen buffer. Be aware that this requires more GPU memory\n and may reduce the update rate.\n\n .. _RGB: https://en.wikipedia.org/wiki/RGB\n .. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space\n\n .. versionadded:: 1.8\n\n .. versionchanged:: 1.13\n Added *format* parameter\n '
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer |
def remove_overlay(self, overlay):
'\n Removes a static overlay from the preview output.\n\n This method removes an overlay which was previously created by\n :meth:`add_overlay`. The *overlay* parameter specifies the\n :class:`PiRenderer` instance that was returned by :meth:`add_overlay`.\n\n .. versionadded:: 1.8\n '
if (not (overlay in self._overlays)):
raise PiCameraValueError('The specified overlay is not owned by this instance of PiCamera')
overlay.close()
self._overlays.remove(overlay) | -4,147,339,455,242,650,600 | Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8 | picamera/camera.py | remove_overlay | RobertLucian/picamera | python | def remove_overlay(self, overlay):
'\n Removes a static overlay from the preview output.\n\n This method removes an overlay which was previously created by\n :meth:`add_overlay`. The *overlay* parameter specifies the\n :class:`PiRenderer` instance that was returned by :meth:`add_overlay`.\n\n .. versionadded:: 1.8\n '
if (not (overlay in self._overlays)):
raise PiCameraValueError('The specified overlay is not owned by this instance of PiCamera')
overlay.close()
self._overlays.remove(overlay) |
def start_recording(self, output, format=None, resize=None, splitter_port=1, **options):
'\n Start recording video from the camera, storing it in *output*.\n\n If *output* is a string, it will be treated as a filename for a new\n file which the video will be written to. If *output* is not a string,\n but is an object with a ``write`` method, it is assumed to be a\n file-like object and the video data is appended to it (the\n implementation only assumes the object has a ``write()`` method - no\n other methods are required but ``flush`` will be called at the end of\n recording if it is present). If *output* is not a string, and has no\n ``write`` method it is assumed to be a writeable object implementing\n the buffer protocol. In this case, the video frames will be written\n sequentially to the underlying buffer (which must be large enough to\n accept all frame data).\n\n If *format* is ``None`` (the default), the method will attempt to guess\n the required video format from the extension of *output* (if it\'s a\n string), or from the *name* attribute of *output* (if it has one). In\n the case that the format cannot be determined, a\n :exc:`PiCameraValueError` will be raised.\n\n If *format* is not ``None``, it must be a string specifying the format\n that you want the video output in. The format can be a MIME-type or\n one of the following strings:\n\n * ``\'h264\'`` - Write an H.264 video stream\n * ``\'mjpeg\'`` - Write an M-JPEG video stream\n * ``\'yuv\'`` - Write the raw video data to a file in YUV420 format\n * ``\'rgb\'`` - Write the raw video data to a file in 24-bit RGB format\n * ``\'rgba\'`` - Write the raw video data to a file in 32-bit RGBA format\n * ``\'bgr\'`` - Write the raw video data to a file in 24-bit BGR format\n * ``\'bgra\'`` - Write the raw video data to a file in 32-bit BGRA format\n\n If *resize* is not ``None`` (the default), it must be a two-element\n tuple specifying the width and height that the video recording should\n be resized to. This is particularly useful for recording video using\n the full resolution of the camera sensor (which is not possible in\n H.264 without down-sizing the output).\n\n The *splitter_port* parameter specifies the port of the built-in\n splitter that the video encoder will be attached to. This defaults to\n ``1`` and most users will have no need to specify anything different.\n If you wish to record multiple (presumably resized) streams\n simultaneously, specify a value between ``0`` and ``3`` inclusive for\n this parameter, ensuring that you do not specify a port that is\n currently in use.\n\n Certain formats accept additional options which can be specified\n as keyword arguments. The ``\'h264\'`` format accepts the following\n additional options:\n\n * *profile* - The H.264 profile to use for encoding. Defaults to\n \'high\', but can be one of \'baseline\', \'main\', \'extended\', \'high\', or\n \'constrained\'.\n\n * *level* - The `H.264 level`_ to use for encoding. Defaults to \'4\',\n but can be any H.264 level up to \'4.2\'.\n\n * *intra_period* - The key frame rate (the rate at which I-frames are\n inserted in the output). Defaults to ``None``, but can be any 32-bit\n integer value representing the number of frames between successive\n I-frames. The special value 0 causes the encoder to produce a single\n initial I-frame, and then only P-frames subsequently. Note that\n :meth:`split_recording` will fail in this mode.\n\n * *intra_refresh* - The key frame format (the way in which I-frames\n will be inserted into the output stream). Defaults to ``None``, but\n can be one of \'cyclic\', \'adaptive\', \'both\', or \'cyclicrows\'.\n\n * *inline_headers* - When ``True``, specifies that the encoder should\n output SPS/PPS headers within the stream to ensure GOPs (groups of\n pictures) are self describing. This is important for streaming\n applications where the client may wish to seek within the stream, and\n enables the use of :meth:`split_recording`. Defaults to ``True`` if\n not specified.\n\n * *sei* - When ``True``, specifies the encoder should include\n "Supplemental Enhancement Information" within the output stream.\n Defaults to ``False`` if not specified.\n\n * *sps_timing* - When ``True`` the encoder includes the camera\'s\n framerate in the SPS header. Defaults to ``False`` if not specified.\n\n * *motion_output* - Indicates the output destination for motion vector\n estimation data. When ``None`` (the default), motion data is not\n output. Otherwise, this can be a filename string, a file-like object,\n or a writeable buffer object (as with the *output* parameter).\n\n All encoded formats accept the following additional options:\n\n * *bitrate* - The bitrate at which video will be encoded. Defaults to\n 17000000 (17Mbps) if not specified. The maximum value depends on the\n selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder\n should not use bitrate control (the encoder is limited by the quality\n only).\n\n * *quality* - Specifies the quality that the encoder should attempt\n to maintain. For the ``\'h264\'`` format, use values between 10 and 40\n where 10 is extremely high quality, and 40 is extremely low (20-25 is\n usually a reasonable range for H.264 encoding). For the ``mjpeg``\n format, use JPEG quality values between 1 and 100 (where higher\n values are higher quality). Quality 0 is special and seems to be\n a "reasonable quality" default.\n\n * *quantization* - Deprecated alias for *quality*.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and ``\'mjpeg\'`` was added as a\n recording format\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.5\n The *quantization* parameter was deprecated in favor of *quality*,\n and the *motion_output* parameter was added.\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n\n .. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels\n '
if ('quantization' in options):
warnings.warn(PiCameraDeprecated('The quantization option is deprecated; please use quality instead (same value)'))
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise | -7,157,117,653,464,527,000 | Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels | picamera/camera.py | start_recording | RobertLucian/picamera | python | def start_recording(self, output, format=None, resize=None, splitter_port=1, **options):
'\n Start recording video from the camera, storing it in *output*.\n\n If *output* is a string, it will be treated as a filename for a new\n file which the video will be written to. If *output* is not a string,\n but is an object with a ``write`` method, it is assumed to be a\n file-like object and the video data is appended to it (the\n implementation only assumes the object has a ``write()`` method - no\n other methods are required but ``flush`` will be called at the end of\n recording if it is present). If *output* is not a string, and has no\n ``write`` method it is assumed to be a writeable object implementing\n the buffer protocol. In this case, the video frames will be written\n sequentially to the underlying buffer (which must be large enough to\n accept all frame data).\n\n If *format* is ``None`` (the default), the method will attempt to guess\n the required video format from the extension of *output* (if it\'s a\n string), or from the *name* attribute of *output* (if it has one). In\n the case that the format cannot be determined, a\n :exc:`PiCameraValueError` will be raised.\n\n If *format* is not ``None``, it must be a string specifying the format\n that you want the video output in. The format can be a MIME-type or\n one of the following strings:\n\n * ``\'h264\'`` - Write an H.264 video stream\n * ``\'mjpeg\'`` - Write an M-JPEG video stream\n * ``\'yuv\'`` - Write the raw video data to a file in YUV420 format\n * ``\'rgb\'`` - Write the raw video data to a file in 24-bit RGB format\n * ``\'rgba\'`` - Write the raw video data to a file in 32-bit RGBA format\n * ``\'bgr\'`` - Write the raw video data to a file in 24-bit BGR format\n * ``\'bgra\'`` - Write the raw video data to a file in 32-bit BGRA format\n\n If *resize* is not ``None`` (the default), it must be a two-element\n tuple specifying the width and height that the video recording should\n be resized to. This is particularly useful for recording video using\n the full resolution of the camera sensor (which is not possible in\n H.264 without down-sizing the output).\n\n The *splitter_port* parameter specifies the port of the built-in\n splitter that the video encoder will be attached to. This defaults to\n ``1`` and most users will have no need to specify anything different.\n If you wish to record multiple (presumably resized) streams\n simultaneously, specify a value between ``0`` and ``3`` inclusive for\n this parameter, ensuring that you do not specify a port that is\n currently in use.\n\n Certain formats accept additional options which can be specified\n as keyword arguments. The ``\'h264\'`` format accepts the following\n additional options:\n\n * *profile* - The H.264 profile to use for encoding. Defaults to\n \'high\', but can be one of \'baseline\', \'main\', \'extended\', \'high\', or\n \'constrained\'.\n\n * *level* - The `H.264 level`_ to use for encoding. Defaults to \'4\',\n but can be any H.264 level up to \'4.2\'.\n\n * *intra_period* - The key frame rate (the rate at which I-frames are\n inserted in the output). Defaults to ``None``, but can be any 32-bit\n integer value representing the number of frames between successive\n I-frames. The special value 0 causes the encoder to produce a single\n initial I-frame, and then only P-frames subsequently. Note that\n :meth:`split_recording` will fail in this mode.\n\n * *intra_refresh* - The key frame format (the way in which I-frames\n will be inserted into the output stream). Defaults to ``None``, but\n can be one of \'cyclic\', \'adaptive\', \'both\', or \'cyclicrows\'.\n\n * *inline_headers* - When ``True``, specifies that the encoder should\n output SPS/PPS headers within the stream to ensure GOPs (groups of\n pictures) are self describing. This is important for streaming\n applications where the client may wish to seek within the stream, and\n enables the use of :meth:`split_recording`. Defaults to ``True`` if\n not specified.\n\n * *sei* - When ``True``, specifies the encoder should include\n "Supplemental Enhancement Information" within the output stream.\n Defaults to ``False`` if not specified.\n\n * *sps_timing* - When ``True`` the encoder includes the camera\'s\n framerate in the SPS header. Defaults to ``False`` if not specified.\n\n * *motion_output* - Indicates the output destination for motion vector\n estimation data. When ``None`` (the default), motion data is not\n output. Otherwise, this can be a filename string, a file-like object,\n or a writeable buffer object (as with the *output* parameter).\n\n All encoded formats accept the following additional options:\n\n * *bitrate* - The bitrate at which video will be encoded. Defaults to\n 17000000 (17Mbps) if not specified. The maximum value depends on the\n selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder\n should not use bitrate control (the encoder is limited by the quality\n only).\n\n * *quality* - Specifies the quality that the encoder should attempt\n to maintain. For the ``\'h264\'`` format, use values between 10 and 40\n where 10 is extremely high quality, and 40 is extremely low (20-25 is\n usually a reasonable range for H.264 encoding). For the ``mjpeg``\n format, use JPEG quality values between 1 and 100 (where higher\n values are higher quality). Quality 0 is special and seems to be\n a "reasonable quality" default.\n\n * *quantization* - Deprecated alias for *quality*.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and ``\'mjpeg\'`` was added as a\n recording format\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.5\n The *quantization* parameter was deprecated in favor of *quality*,\n and the *motion_output* parameter was added.\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n\n .. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels\n '
if ('quantization' in options):
warnings.warn(PiCameraDeprecated('The quantization option is deprecated; please use quality instead (same value)'))
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise |
def split_recording(self, output, splitter_port=1, **options):
'\n Continue the recording in the specified output; close existing output.\n\n When called, the video encoder will wait for the next appropriate\n split point (an inline SPS header), then will cease writing to the\n current output (and close it, if it was specified as a filename), and\n continue writing to the newly specified *output*.\n\n The *output* parameter is treated as in the :meth:`start_recording`\n method (it can be a string, a file-like object, or a writeable\n buffer object).\n\n The *motion_output* parameter can be used to redirect the output of the\n motion vector data in the same fashion as *output*. If *motion_output*\n is ``None`` (the default) then motion vector data will not be\n redirected and will continue being written to the output specified by\n the *motion_output* parameter given to :meth:`start_recording`.\n Alternatively, if you only wish to redirect motion vector data, you can\n set *output* to ``None`` and given a new value for *motion_output*.\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to change outputs is attached to. This\n defaults to ``1`` and most users will have no need to specify anything\n different. Valid values are between ``0`` and ``3`` inclusive.\n\n Note that unlike :meth:`start_recording`, you cannot specify format or\n other options as these cannot be changed in the middle of recording.\n Only the new *output* (and *motion_output*) can be specified.\n Furthermore, the format of the recording is currently limited to H264,\n and *inline_headers* must be ``True`` when :meth:`start_recording` is\n called (this is the default).\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.5\n The *motion_output* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n '
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.split(output, options.get('motion_output')) | 3,772,228,076,396,274,000 | Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added. | picamera/camera.py | split_recording | RobertLucian/picamera | python | def split_recording(self, output, splitter_port=1, **options):
'\n Continue the recording in the specified output; close existing output.\n\n When called, the video encoder will wait for the next appropriate\n split point (an inline SPS header), then will cease writing to the\n current output (and close it, if it was specified as a filename), and\n continue writing to the newly specified *output*.\n\n The *output* parameter is treated as in the :meth:`start_recording`\n method (it can be a string, a file-like object, or a writeable\n buffer object).\n\n The *motion_output* parameter can be used to redirect the output of the\n motion vector data in the same fashion as *output*. If *motion_output*\n is ``None`` (the default) then motion vector data will not be\n redirected and will continue being written to the output specified by\n the *motion_output* parameter given to :meth:`start_recording`.\n Alternatively, if you only wish to redirect motion vector data, you can\n set *output* to ``None`` and given a new value for *motion_output*.\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to change outputs is attached to. This\n defaults to ``1`` and most users will have no need to specify anything\n different. Valid values are between ``0`` and ``3`` inclusive.\n\n Note that unlike :meth:`start_recording`, you cannot specify format or\n other options as these cannot be changed in the middle of recording.\n Only the new *output* (and *motion_output*) can be specified.\n Furthermore, the format of the recording is currently limited to H264,\n and *inline_headers* must be ``True`` when :meth:`start_recording` is\n called (this is the default).\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.5\n The *motion_output* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n '
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.split(output, options.get('motion_output')) |
def request_key_frame(self, splitter_port=1):
"\n Request the encoder generate a key-frame as soon as possible.\n\n When called, the video encoder running on the specified *splitter_port*\n will attempt to produce a key-frame (full-image frame) as soon as\n possible. The *splitter_port* defaults to ``1``. Valid values are\n between ``0`` and ``3`` inclusive.\n\n .. note::\n\n This method is only meaningful for recordings encoded in the H264\n format as MJPEG produces full frames for every frame recorded.\n Furthermore, there's no guarantee that the *next* frame will be\n a key-frame; this is simply a request to produce one as soon as\n possible after the call.\n\n .. versionadded:: 1.11\n "
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.request_key_frame() | -5,464,494,046,034,014,000 | Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11 | picamera/camera.py | request_key_frame | RobertLucian/picamera | python | def request_key_frame(self, splitter_port=1):
"\n Request the encoder generate a key-frame as soon as possible.\n\n When called, the video encoder running on the specified *splitter_port*\n will attempt to produce a key-frame (full-image frame) as soon as\n possible. The *splitter_port* defaults to ``1``. Valid values are\n between ``0`` and ``3`` inclusive.\n\n .. note::\n\n This method is only meaningful for recordings encoded in the H264\n format as MJPEG produces full frames for every frame recorded.\n Furthermore, there's no guarantee that the *next* frame will be\n a key-frame; this is simply a request to produce one as soon as\n possible after the call.\n\n .. versionadded:: 1.11\n "
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.request_key_frame() |
def wait_recording(self, timeout=0, splitter_port=1):
'\n Wait on the video encoder for timeout seconds.\n\n It is recommended that this method is called while recording to check\n for exceptions. If an error occurs during recording (for example out of\n disk space) the recording will stop, but an exception will only be\n raised when the :meth:`wait_recording` or :meth:`stop_recording`\n methods are called.\n\n If ``timeout`` is 0 (the default) the function will immediately return\n (or raise an exception if an error has occurred).\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to wait on is attached to. This\n defaults to ``1`` and most users will have no need to specify anything\n different. Valid values are between ``0`` and ``3`` inclusive.\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n '
assert (timeout is not None)
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.wait(timeout) | 5,793,073,833,200,230,000 | Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added | picamera/camera.py | wait_recording | RobertLucian/picamera | python | def wait_recording(self, timeout=0, splitter_port=1):
'\n Wait on the video encoder for timeout seconds.\n\n It is recommended that this method is called while recording to check\n for exceptions. If an error occurs during recording (for example out of\n disk space) the recording will stop, but an exception will only be\n raised when the :meth:`wait_recording` or :meth:`stop_recording`\n methods are called.\n\n If ``timeout`` is 0 (the default) the function will immediately return\n (or raise an exception if an error has occurred).\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to wait on is attached to. This\n defaults to ``1`` and most users will have no need to specify anything\n different. Valid values are between ``0`` and ``3`` inclusive.\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n '
assert (timeout is not None)
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
encoder.wait(timeout) |
def stop_recording(self, splitter_port=1):
'\n Stop recording video from the camera.\n\n After calling this method the video encoder will be shut down and\n output will stop being written to the file-like object specified with\n :meth:`start_recording`. If an error occurred during recording and\n :meth:`wait_recording` has not been called since the error then this\n method will raise the exception.\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to stop is attached to. This defaults to\n ``1`` and most users will have no need to specify anything different.\n Valid values are between ``0`` and ``3`` inclusive.\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n '
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port] | 3,910,143,239,972,328,000 | Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added | picamera/camera.py | stop_recording | RobertLucian/picamera | python | def stop_recording(self, splitter_port=1):
'\n Stop recording video from the camera.\n\n After calling this method the video encoder will be shut down and\n output will stop being written to the file-like object specified with\n :meth:`start_recording`. If an error occurred during recording and\n :meth:`wait_recording` has not been called since the error then this\n method will raise the exception.\n\n The *splitter_port* parameter specifies which port of the video\n splitter the encoder you wish to stop is attached to. This defaults to\n ``1`` and most users will have no need to specify anything different.\n Valid values are between ``0`` and ``3`` inclusive.\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n '
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(('There is no recording in progress on port %d' % splitter_port))
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port] |
def record_sequence(self, outputs, format='h264', resize=None, splitter_port=1, **options):
"\n Record a sequence of video clips from the camera.\n\n This method accepts a sequence or iterator of *outputs* each of which\n must either be a string specifying a filename for output, or a\n file-like object with a ``write`` method.\n\n The method acts as an iterator itself, yielding each item of the\n sequence in turn. In this way, the caller can control how long to\n record to each item by only permitting the loop to continue when ready\n to switch to the next output.\n\n The *format*, *splitter_port*, *resize*, and *options* parameters are\n the same as in :meth:`start_recording`, but *format* defaults to\n ``'h264'``. The format is **not** derived from the filenames in\n *outputs* by this method.\n\n For example, to record 3 consecutive 10-second video clips, writing the\n output to a series of H.264 files named clip01.h264, clip02.h264, and\n clip03.h264 one could use the following::\n\n import picamera\n with picamera.PiCamera() as camera:\n for filename in camera.record_sequence([\n 'clip01.h264',\n 'clip02.h264',\n 'clip03.h264']):\n print('Recording to %s' % filename)\n camera.wait_recording(10)\n\n Alternatively, a more flexible method of writing the previous example\n (which is easier to expand to a large number of output files) is by\n using a generator expression as the input sequence::\n\n import picamera\n with picamera.PiCamera() as camera:\n for filename in camera.record_sequence(\n 'clip%02d.h264' % i for i in range(3)):\n print('Recording to %s' % filename)\n camera.wait_recording(10)\n\n More advanced techniques are also possible by utilising infinite\n sequences, such as those generated by :func:`itertools.cycle`. In the\n following example, recording is switched between two in-memory streams.\n Whilst one stream is recording, the other is being analysed. The script\n only stops recording when a video recording meets some criteria defined\n by the ``process`` function::\n\n import io\n import itertools\n import picamera\n with picamera.PiCamera() as camera:\n analyse = None\n for stream in camera.record_sequence(\n itertools.cycle((io.BytesIO(), io.BytesIO()))):\n if analyse is not None:\n if process(analyse):\n break\n analyse.seek(0)\n analyse.truncate()\n camera.wait_recording(5)\n analyse = stream\n\n .. versionadded:: 1.3\n "
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
(yield output)
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port] | 7,498,983,051,958,845,000 | Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3 | picamera/camera.py | record_sequence | RobertLucian/picamera | python | def record_sequence(self, outputs, format='h264', resize=None, splitter_port=1, **options):
"\n Record a sequence of video clips from the camera.\n\n This method accepts a sequence or iterator of *outputs* each of which\n must either be a string specifying a filename for output, or a\n file-like object with a ``write`` method.\n\n The method acts as an iterator itself, yielding each item of the\n sequence in turn. In this way, the caller can control how long to\n record to each item by only permitting the loop to continue when ready\n to switch to the next output.\n\n The *format*, *splitter_port*, *resize*, and *options* parameters are\n the same as in :meth:`start_recording`, but *format* defaults to\n ``'h264'``. The format is **not** derived from the filenames in\n *outputs* by this method.\n\n For example, to record 3 consecutive 10-second video clips, writing the\n output to a series of H.264 files named clip01.h264, clip02.h264, and\n clip03.h264 one could use the following::\n\n import picamera\n with picamera.PiCamera() as camera:\n for filename in camera.record_sequence([\n 'clip01.h264',\n 'clip02.h264',\n 'clip03.h264']):\n print('Recording to %s' % filename)\n camera.wait_recording(10)\n\n Alternatively, a more flexible method of writing the previous example\n (which is easier to expand to a large number of output files) is by\n using a generator expression as the input sequence::\n\n import picamera\n with picamera.PiCamera() as camera:\n for filename in camera.record_sequence(\n 'clip%02d.h264' % i for i in range(3)):\n print('Recording to %s' % filename)\n camera.wait_recording(10)\n\n More advanced techniques are also possible by utilising infinite\n sequences, such as those generated by :func:`itertools.cycle`. In the\n following example, recording is switched between two in-memory streams.\n Whilst one stream is recording, the other is being analysed. The script\n only stops recording when a video recording meets some criteria defined\n by the ``process`` function::\n\n import io\n import itertools\n import picamera\n with picamera.PiCamera() as camera:\n analyse = None\n for stream in camera.record_sequence(\n itertools.cycle((io.BytesIO(), io.BytesIO()))):\n if analyse is not None:\n if process(analyse):\n break\n analyse.seek(0)\n analyse.truncate()\n camera.wait_recording(5)\n analyse = stream\n\n .. versionadded:: 1.3\n "
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(True, splitter_port)
format = self._get_video_format(, format)
encoder = self._get_video_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
(yield output)
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port] |
def capture(self, output, format=None, use_video_port=False, resize=None, splitter_port=0, bayer=False, **options):
'\n Capture an image from the camera, storing it in *output*.\n\n If *output* is a string, it will be treated as a filename for a new\n file which the image will be written to. If *output* is not a string,\n but is an object with a ``write`` method, it is assumed to be a\n file-like object and the image data is appended to it (the\n implementation only assumes the object has a ``write`` method - no\n other methods are required but ``flush`` will be called at the end of\n capture if it is present). If *output* is not a string, and has no\n ``write`` method it is assumed to be a writeable object implementing\n the buffer protocol. In this case, the image data will be written\n directly to the underlying buffer (which must be large enough to accept\n the image data).\n\n If *format* is ``None`` (the default), the method will attempt to guess\n the required image format from the extension of *output* (if it\'s a\n string), or from the *name* attribute of *output* (if it has one). In\n the case that the format cannot be determined, a\n :exc:`PiCameraValueError` will be raised.\n\n If *format* is not ``None``, it must be a string specifying the format\n that you want the image output in. The format can be a MIME-type or\n one of the following strings:\n\n * ``\'jpeg\'`` - Write a JPEG file\n * ``\'png\'`` - Write a PNG file\n * ``\'gif\'`` - Write a GIF file\n * ``\'bmp\'`` - Write a Windows bitmap file\n * ``\'yuv\'`` - Write the raw image data to a file in YUV420 format\n * ``\'rgb\'`` - Write the raw image data to a file in 24-bit RGB format\n * ``\'rgba\'`` - Write the raw image data to a file in 32-bit RGBA format\n * ``\'bgr\'`` - Write the raw image data to a file in 24-bit BGR format\n * ``\'bgra\'`` - Write the raw image data to a file in 32-bit BGRA format\n * ``\'raw\'`` - Deprecated option for raw captures; the format is taken\n from the deprecated :attr:`raw_format` attribute\n\n The *use_video_port* parameter controls whether the camera\'s image or\n video port is used to capture images. It defaults to ``False`` which\n means that the camera\'s image port is used. This port is slow but\n produces better quality pictures. If you need rapid capture up to the\n rate of video frames, set this to ``True``.\n\n When *use_video_port* is ``True``, the *splitter_port* parameter\n specifies the port of the video splitter that the image encoder will be\n attached to. This defaults to ``0`` and most users will have no need to\n specify anything different. This parameter is ignored when\n *use_video_port* is ``False``. See :ref:`mmal` for more information\n about the video splitter.\n\n If *resize* is not ``None`` (the default), it must be a two-element\n tuple specifying the width and height that the image should be resized\n to.\n\n .. warning::\n\n If *resize* is specified, or *use_video_port* is ``True``, Exif\n metadata will **not** be included in JPEG output. This is due to an\n underlying firmware limitation.\n\n Certain file formats accept additional options which can be specified\n as keyword arguments. Currently, only the ``\'jpeg\'`` encoder accepts\n additional options, which are:\n\n * *quality* - Defines the quality of the JPEG encoder as an integer\n ranging from 1 to 100. Defaults to 85. Please note that JPEG quality\n is not a percentage and `definitions of quality`_ vary widely.\n\n * *restart* - Defines the restart interval for the JPEG encoder as a\n number of JPEG MCUs. The actual restart interval used will be a\n multiple of the number of MCUs per row in the resulting image.\n\n * *thumbnail* - Defines the size and quality of the thumbnail to embed\n in the Exif metadata. Specifying ``None`` disables thumbnail\n generation. Otherwise, specify a tuple of ``(width, height,\n quality)``. Defaults to ``(64, 48, 35)``.\n\n * *bayer* - If ``True``, the raw bayer data from the camera\'s sensor\n is included in the Exif metadata.\n\n .. note::\n\n The so-called "raw" formats listed above (``\'yuv\'``, ``\'rgb\'``,\n etc.) do not represent the raw bayer data from the camera\'s sensor.\n Rather they provide access to the image data after GPU processing,\n but before format encoding (JPEG, PNG, etc). Currently, the only\n method of accessing the raw bayer data is via the *bayer* parameter\n described above.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added, and *bayer* was added as\n an option for the ``\'jpeg\'`` format\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n\n .. _definitions of quality: http://photo.net/learn/jpeg/#qual\n '
if (format == 'raw'):
warnings.warn(PiCameraDeprecated('The "raw" format option is deprecated; specify the required format directly instead ("yuv", "rgb", etc.)'))
if (use_video_port and bayer):
raise PiCameraValueError('bayer is only valid with still port captures')
if ('burst' in options):
raise PiCameraValueError('burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] | -4,667,808,940,049,474,000 | Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual | picamera/camera.py | capture | RobertLucian/picamera | python | def capture(self, output, format=None, use_video_port=False, resize=None, splitter_port=0, bayer=False, **options):
'\n Capture an image from the camera, storing it in *output*.\n\n If *output* is a string, it will be treated as a filename for a new\n file which the image will be written to. If *output* is not a string,\n but is an object with a ``write`` method, it is assumed to be a\n file-like object and the image data is appended to it (the\n implementation only assumes the object has a ``write`` method - no\n other methods are required but ``flush`` will be called at the end of\n capture if it is present). If *output* is not a string, and has no\n ``write`` method it is assumed to be a writeable object implementing\n the buffer protocol. In this case, the image data will be written\n directly to the underlying buffer (which must be large enough to accept\n the image data).\n\n If *format* is ``None`` (the default), the method will attempt to guess\n the required image format from the extension of *output* (if it\'s a\n string), or from the *name* attribute of *output* (if it has one). In\n the case that the format cannot be determined, a\n :exc:`PiCameraValueError` will be raised.\n\n If *format* is not ``None``, it must be a string specifying the format\n that you want the image output in. The format can be a MIME-type or\n one of the following strings:\n\n * ``\'jpeg\'`` - Write a JPEG file\n * ``\'png\'`` - Write a PNG file\n * ``\'gif\'`` - Write a GIF file\n * ``\'bmp\'`` - Write a Windows bitmap file\n * ``\'yuv\'`` - Write the raw image data to a file in YUV420 format\n * ``\'rgb\'`` - Write the raw image data to a file in 24-bit RGB format\n * ``\'rgba\'`` - Write the raw image data to a file in 32-bit RGBA format\n * ``\'bgr\'`` - Write the raw image data to a file in 24-bit BGR format\n * ``\'bgra\'`` - Write the raw image data to a file in 32-bit BGRA format\n * ``\'raw\'`` - Deprecated option for raw captures; the format is taken\n from the deprecated :attr:`raw_format` attribute\n\n The *use_video_port* parameter controls whether the camera\'s image or\n video port is used to capture images. It defaults to ``False`` which\n means that the camera\'s image port is used. This port is slow but\n produces better quality pictures. If you need rapid capture up to the\n rate of video frames, set this to ``True``.\n\n When *use_video_port* is ``True``, the *splitter_port* parameter\n specifies the port of the video splitter that the image encoder will be\n attached to. This defaults to ``0`` and most users will have no need to\n specify anything different. This parameter is ignored when\n *use_video_port* is ``False``. See :ref:`mmal` for more information\n about the video splitter.\n\n If *resize* is not ``None`` (the default), it must be a two-element\n tuple specifying the width and height that the image should be resized\n to.\n\n .. warning::\n\n If *resize* is specified, or *use_video_port* is ``True``, Exif\n metadata will **not** be included in JPEG output. This is due to an\n underlying firmware limitation.\n\n Certain file formats accept additional options which can be specified\n as keyword arguments. Currently, only the ``\'jpeg\'`` encoder accepts\n additional options, which are:\n\n * *quality* - Defines the quality of the JPEG encoder as an integer\n ranging from 1 to 100. Defaults to 85. Please note that JPEG quality\n is not a percentage and `definitions of quality`_ vary widely.\n\n * *restart* - Defines the restart interval for the JPEG encoder as a\n number of JPEG MCUs. The actual restart interval used will be a\n multiple of the number of MCUs per row in the resulting image.\n\n * *thumbnail* - Defines the size and quality of the thumbnail to embed\n in the Exif metadata. Specifying ``None`` disables thumbnail\n generation. Otherwise, specify a tuple of ``(width, height,\n quality)``. Defaults to ``(64, 48, 35)``.\n\n * *bayer* - If ``True``, the raw bayer data from the camera\'s sensor\n is included in the Exif metadata.\n\n .. note::\n\n The so-called "raw" formats listed above (``\'yuv\'``, ``\'rgb\'``,\n etc.) do not represent the raw bayer data from the camera\'s sensor.\n Rather they provide access to the image data after GPU processing,\n but before format encoding (JPEG, PNG, etc). Currently, the only\n method of accessing the raw bayer data is via the *bayer* parameter\n described above.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added, and *bayer* was added as\n an option for the ``\'jpeg\'`` format\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n\n .. _definitions of quality: http://photo.net/learn/jpeg/#qual\n '
if (format == 'raw'):
warnings.warn(PiCameraDeprecated('The "raw" format option is deprecated; specify the required format directly instead ("yuv", "rgb", etc.)'))
if (use_video_port and bayer):
raise PiCameraValueError('bayer is only valid with still port captures')
if ('burst' in options):
raise PiCameraValueError('burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] |
def capture_sequence(self, outputs, format='jpeg', use_video_port=False, resize=None, splitter_port=0, burst=False, bayer=False, **options):
"\n Capture a sequence of consecutive images from the camera.\n\n This method accepts a sequence or iterator of *outputs* each of which\n must either be a string specifying a filename for output, or a\n file-like object with a ``write`` method, or a writeable buffer object.\n For each item in the sequence or iterator of outputs, the camera\n captures a single image as fast as it can.\n\n The *format*, *use_video_port*, *splitter_port*, *resize*, and\n *options* parameters are the same as in :meth:`capture`, but *format*\n defaults to ``'jpeg'``. The format is **not** derived from the\n filenames in *outputs* by this method.\n\n If *use_video_port* is ``False`` (the default), the *burst* parameter\n can be used to make still port captures faster. Specifically, this\n prevents the preview from switching resolutions between captures which\n significantly speeds up consecutive captures from the still port. The\n downside is that this mode is currently has several bugs; the major\n issue is that if captures are performed too quickly some frames will\n come back severely underexposed. It is recommended that users avoid the\n *burst* parameter unless they absolutely require it and are prepared to\n work around such issues.\n\n For example, to capture 3 consecutive images::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n camera.capture_sequence([\n 'image1.jpg',\n 'image2.jpg',\n 'image3.jpg',\n ])\n camera.stop_preview()\n\n If you wish to capture a large number of images, a list comprehension\n or generator expression can be used to construct the list of filenames\n to use::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n camera.capture_sequence([\n 'image%02d.jpg' % i\n for i in range(100)\n ])\n camera.stop_preview()\n\n More complex effects can be obtained by using a generator function to\n provide the filenames or output objects.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n "
if use_video_port:
if burst:
raise PiCameraValueError('burst is only valid with still port captures')
if bayer:
raise PiCameraValueError('bayer is only valid with still port captures')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] | 8,720,782,316,086,409,000 | Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added. | picamera/camera.py | capture_sequence | RobertLucian/picamera | python | def capture_sequence(self, outputs, format='jpeg', use_video_port=False, resize=None, splitter_port=0, burst=False, bayer=False, **options):
"\n Capture a sequence of consecutive images from the camera.\n\n This method accepts a sequence or iterator of *outputs* each of which\n must either be a string specifying a filename for output, or a\n file-like object with a ``write`` method, or a writeable buffer object.\n For each item in the sequence or iterator of outputs, the camera\n captures a single image as fast as it can.\n\n The *format*, *use_video_port*, *splitter_port*, *resize*, and\n *options* parameters are the same as in :meth:`capture`, but *format*\n defaults to ``'jpeg'``. The format is **not** derived from the\n filenames in *outputs* by this method.\n\n If *use_video_port* is ``False`` (the default), the *burst* parameter\n can be used to make still port captures faster. Specifically, this\n prevents the preview from switching resolutions between captures which\n significantly speeds up consecutive captures from the still port. The\n downside is that this mode is currently has several bugs; the major\n issue is that if captures are performed too quickly some frames will\n come back severely underexposed. It is recommended that users avoid the\n *burst* parameter unless they absolutely require it and are prepared to\n work around such issues.\n\n For example, to capture 3 consecutive images::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n camera.capture_sequence([\n 'image1.jpg',\n 'image2.jpg',\n 'image3.jpg',\n ])\n camera.stop_preview()\n\n If you wish to capture a large number of images, a list comprehension\n or generator expression can be used to construct the list of filenames\n to use::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n time.sleep(2)\n camera.capture_sequence([\n 'image%02d.jpg' % i\n for i in range(100)\n ])\n camera.stop_preview()\n\n More complex effects can be obtained by using a generator function to\n provide the filenames or output objects.\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n "
if use_video_port:
if burst:
raise PiCameraValueError('burst is only valid with still port captures')
if bayer:
raise PiCameraValueError('bayer is only valid with still port captures')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(, format)
if use_video_port:
encoder = self._get_images_encoder(camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] |
def capture_continuous(self, output, format=None, use_video_port=False, resize=None, splitter_port=0, burst=False, bayer=False, **options):
"\n Capture images continuously from the camera as an infinite iterator.\n\n This method returns an infinite iterator of images captured\n continuously from the camera. If *output* is a string, each captured\n image is stored in a file named after *output* after substitution of\n two values with the :meth:`~str.format` method. Those two values are:\n\n * ``{counter}`` - a simple incrementor that starts at 1 and increases\n by 1 for each image taken\n\n * ``{timestamp}`` - a :class:`~datetime.datetime` instance\n\n The table below contains several example values of *output* and the\n sequence of filenames those values could produce:\n\n .. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|\n\n +--------------------------------------------+--------------------------------------------+-------+\n | *output* Value | Filenames | Notes |\n +============================================+============================================+=======+\n | ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |\n | | image2013-10-05 12:07:32.498539, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |\n | | image12-10-14-905398.jpg | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |\n | | 121014-003.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n\n 1. Note that because timestamp's default output includes colons (:),\n the resulting filenames are not suitable for use on Windows. For\n this reason (and the fact the default contains spaces) it is\n strongly recommended you always specify a format when using\n ``{timestamp}``.\n\n 2. You can use both ``{timestamp}`` and ``{counter}`` in a single\n format string (multiple times too!) although this tends to be\n redundant.\n\n If *output* is not a string, but has a ``write`` method, it is assumed\n to be a file-like object and each image is simply written to this\n object sequentially. In this case you will likely either want to write\n something to the object between the images to distinguish them, or\n clear the object between iterations. If *output* is not a string, and\n has no ``write`` method, it is assumed to be a writeable object\n supporting the buffer protocol; each image is simply written to the\n buffer sequentially.\n\n The *format*, *use_video_port*, *splitter_port*, *resize*, and\n *options* parameters are the same as in :meth:`capture`.\n\n If *use_video_port* is ``False`` (the default), the *burst* parameter\n can be used to make still port captures faster. Specifically, this\n prevents the preview from switching resolutions between captures which\n significantly speeds up consecutive captures from the still port. The\n downside is that this mode is currently has several bugs; the major\n issue is that if captures are performed too quickly some frames will\n come back severely underexposed. It is recommended that users avoid the\n *burst* parameter unless they absolutely require it and are prepared to\n work around such issues.\n\n For example, to capture 60 images with a one second delay between them,\n writing the output to a series of JPEG files named image01.jpg,\n image02.jpg, etc. one could do the following::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n try:\n for i, filename in enumerate(\n camera.capture_continuous('image{counter:02d}.jpg')):\n print(filename)\n time.sleep(1)\n if i == 59:\n break\n finally:\n camera.stop_preview()\n\n Alternatively, to capture JPEG frames as fast as possible into an\n in-memory stream, performing some processing on each stream until\n some condition is satisfied::\n\n import io\n import time\n import picamera\n with picamera.PiCamera() as camera:\n stream = io.BytesIO()\n for foo in camera.capture_continuous(stream, format='jpeg'):\n # Truncate the stream to the current position (in case\n # prior iterations output a longer image)\n stream.truncate()\n stream.seek(0)\n if process(stream):\n break\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n "
if use_video_port:
if burst:
raise PiCameraValueError('burst is only valid with still port captures')
if bayer:
raise PiCameraValueError('bayer is only valid with still port captures')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(counter=counter, timestamp=datetime.datetime.now())
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
(yield filename)
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
(yield output)
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] | -7,093,969,075,329,482,000 | Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added. | picamera/camera.py | capture_continuous | RobertLucian/picamera | python | def capture_continuous(self, output, format=None, use_video_port=False, resize=None, splitter_port=0, burst=False, bayer=False, **options):
"\n Capture images continuously from the camera as an infinite iterator.\n\n This method returns an infinite iterator of images captured\n continuously from the camera. If *output* is a string, each captured\n image is stored in a file named after *output* after substitution of\n two values with the :meth:`~str.format` method. Those two values are:\n\n * ``{counter}`` - a simple incrementor that starts at 1 and increases\n by 1 for each image taken\n\n * ``{timestamp}`` - a :class:`~datetime.datetime` instance\n\n The table below contains several example values of *output* and the\n sequence of filenames those values could produce:\n\n .. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|\n\n +--------------------------------------------+--------------------------------------------+-------+\n | *output* Value | Filenames | Notes |\n +============================================+============================================+=======+\n | ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |\n | | image2013-10-05 12:07:32.498539, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |\n | | image12-10-14-905398.jpg | |\n +--------------------------------------------+--------------------------------------------+-------+\n | ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |\n | | 121014-003.jpg, ... | |\n +--------------------------------------------+--------------------------------------------+-------+\n\n 1. Note that because timestamp's default output includes colons (:),\n the resulting filenames are not suitable for use on Windows. For\n this reason (and the fact the default contains spaces) it is\n strongly recommended you always specify a format when using\n ``{timestamp}``.\n\n 2. You can use both ``{timestamp}`` and ``{counter}`` in a single\n format string (multiple times too!) although this tends to be\n redundant.\n\n If *output* is not a string, but has a ``write`` method, it is assumed\n to be a file-like object and each image is simply written to this\n object sequentially. In this case you will likely either want to write\n something to the object between the images to distinguish them, or\n clear the object between iterations. If *output* is not a string, and\n has no ``write`` method, it is assumed to be a writeable object\n supporting the buffer protocol; each image is simply written to the\n buffer sequentially.\n\n The *format*, *use_video_port*, *splitter_port*, *resize*, and\n *options* parameters are the same as in :meth:`capture`.\n\n If *use_video_port* is ``False`` (the default), the *burst* parameter\n can be used to make still port captures faster. Specifically, this\n prevents the preview from switching resolutions between captures which\n significantly speeds up consecutive captures from the still port. The\n downside is that this mode is currently has several bugs; the major\n issue is that if captures are performed too quickly some frames will\n come back severely underexposed. It is recommended that users avoid the\n *burst* parameter unless they absolutely require it and are prepared to\n work around such issues.\n\n For example, to capture 60 images with a one second delay between them,\n writing the output to a series of JPEG files named image01.jpg,\n image02.jpg, etc. one could do the following::\n\n import time\n import picamera\n with picamera.PiCamera() as camera:\n camera.start_preview()\n try:\n for i, filename in enumerate(\n camera.capture_continuous('image{counter:02d}.jpg')):\n print(filename)\n time.sleep(1)\n if i == 59:\n break\n finally:\n camera.stop_preview()\n\n Alternatively, to capture JPEG frames as fast as possible into an\n in-memory stream, performing some processing on each stream until\n some condition is satisfied::\n\n import io\n import time\n import picamera\n with picamera.PiCamera() as camera:\n stream = io.BytesIO()\n for foo in camera.capture_continuous(stream, format='jpeg'):\n # Truncate the stream to the current position (in case\n # prior iterations output a longer image)\n stream.truncate()\n stream.seek(0)\n if process(stream):\n break\n\n .. versionchanged:: 1.0\n The *resize* parameter was added, and raw capture formats can now\n be specified directly\n\n .. versionchanged:: 1.3\n The *splitter_port* parameter was added\n\n .. versionchanged:: 1.11\n Support for buffer outputs was added.\n "
if use_video_port:
if burst:
raise PiCameraValueError('burst is only valid with still port captures')
if bayer:
raise PiCameraValueError('bayer is only valid with still port captures')
with self._encoders_lock:
(camera_port, output_port) = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(counter=counter, timestamp=datetime.datetime.now())
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
(yield filename)
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if (not encoder.wait(self.CAPTURE_TIMEOUT)):
raise PiCameraRuntimeError('Timed out waiting for capture to end')
(yield output)
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port] |
@property
def closed(self):
'\n Returns ``True`` if the :meth:`close` method has been called.\n '
return (not self._camera) | 3,284,690,800,250,654,000 | Returns ``True`` if the :meth:`close` method has been called. | picamera/camera.py | closed | RobertLucian/picamera | python | @property
def closed(self):
'\n \n '
return (not self._camera) |
@property
def recording(self):
'\n Returns ``True`` if the :meth:`start_recording` method has been called,\n and no :meth:`stop_recording` call has been made yet.\n '
return any(((isinstance(e, PiVideoEncoder) and e.active) for e in self._encoders.values())) | -7,456,034,436,530,100,000 | Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet. | picamera/camera.py | recording | RobertLucian/picamera | python | @property
def recording(self):
'\n Returns ``True`` if the :meth:`start_recording` method has been called,\n and no :meth:`stop_recording` call has been made yet.\n '
return any(((isinstance(e, PiVideoEncoder) and e.active) for e in self._encoders.values())) |
@property
def previewing(self):
'\n Returns ``True`` if the :meth:`start_preview` method has been called,\n and no :meth:`stop_preview` call has been made yet.\n\n .. deprecated:: 1.8\n Test whether :attr:`preview` is ``None`` instead.\n '
warnings.warn(PiCameraDeprecated('PiCamera.previewing is deprecated; test PiCamera.preview is not None instead'))
return isinstance(self._preview, PiPreviewRenderer) | 1,968,703,755,299,582,700 | Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead. | picamera/camera.py | previewing | RobertLucian/picamera | python | @property
def previewing(self):
'\n Returns ``True`` if the :meth:`start_preview` method has been called,\n and no :meth:`stop_preview` call has been made yet.\n\n .. deprecated:: 1.8\n Test whether :attr:`preview` is ``None`` instead.\n '
warnings.warn(PiCameraDeprecated('PiCamera.previewing is deprecated; test PiCamera.preview is not None instead'))
return isinstance(self._preview, PiPreviewRenderer) |
@property
def revision(self):
"\n Returns a string representing the revision of the Pi's camera module.\n At the time of writing, the string returned is 'ov5647' for the V1\n module, and 'imx219' for the V2 module.\n "
return self._revision | -4,425,374,092,435,434,000 | Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module. | picamera/camera.py | revision | RobertLucian/picamera | python | @property
def revision(self):
"\n Returns a string representing the revision of the Pi's camera module.\n At the time of writing, the string returned is 'ov5647' for the V1\n module, and 'imx219' for the V2 module.\n "
return self._revision |
@property
def exif_tags(self):
"\n Holds a mapping of the Exif tags to apply to captured images.\n\n .. note::\n\n Please note that Exif tagging is only supported with the ``jpeg``\n format.\n\n By default several Exif tags are automatically applied to any images\n taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to\n ``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and\n three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and\n ``EXIF.DateTimeDigitized`` which are all set to the current date and\n time just before the picture is taken.\n\n If you wish to set additional Exif tags, or override any of the\n aforementioned tags, simply add entries to the exif_tags map before\n calling :meth:`capture`. For example::\n\n camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'\n\n The Exif standard mandates ASCII encoding for all textual values, hence\n strings containing non-ASCII characters will cause an encoding error to\n be raised when :meth:`capture` is called. If you wish to set binary\n values, use a :func:`bytes` value::\n\n camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'\n\n .. warning::\n\n Binary Exif values are currently ignored; this appears to be a\n libmmal or firmware bug.\n\n You may also specify datetime values, integer, or float values, all of\n which will be converted to appropriate ASCII strings (datetime values\n are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif\n standard).\n\n The currently supported Exif tags are:\n\n +-------+-------------------------------------------------------------+\n | Group | Tags |\n +=======+=============================================================+\n | IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |\n | IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |\n | | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |\n | | StripByteCounts, Xresolution, Yresolution, |\n | | PlanarConfiguration, ResolutionUnit, TransferFunction, |\n | | Software, DateTime, Artist, WhitePoint, |\n | | PrimaryChromaticities, JPEGInterchangeFormat, |\n | | JPEGInterchangeFormatLength, YcbCrCoefficients, |\n | | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |\n | | Copyright |\n +-------+-------------------------------------------------------------+\n | EXIF | ExposureTime, FNumber, ExposureProgram, |\n | | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |\n | | DateTimeOriginal, DateTimeDigitized, |\n | | ComponentsConfiguration, CompressedBitsPerPixel, |\n | | ShutterSpeedValue, ApertureValue, BrightnessValue, |\n | | ExposureBiasValue, MaxApertureValue, SubjectDistance, |\n | | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |\n | | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |\n | | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |\n | | PixelXDimension, PixelYDimension, RelatedSoundFile, |\n | | FlashEnergy, SpacialFrequencyResponse, |\n | | FocalPlaneXResolution, FocalPlaneYResolution, |\n | | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |\n | | SensingMethod, FileSource, SceneType, CFAPattern, |\n | | CustomRendered, ExposureMode, WhiteBalance, |\n | | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |\n | | GainControl, Contrast, Saturation, Sharpness, |\n | | DeviceSettingDescription, SubjectDistanceRange, |\n | | ImageUniqueID |\n +-------+-------------------------------------------------------------+\n | GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |\n | | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |\n | | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |\n | | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |\n | | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |\n | | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |\n | | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |\n | | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |\n | | GPSAreaInformation, GPSDateStamp, GPSDifferential |\n +-------+-------------------------------------------------------------+\n | EINT | InteroperabilityIndex, InteroperabilityVersion, |\n | | RelatedImageFileFormat, RelatedImageWidth, |\n | | RelatedImageLength |\n +-------+-------------------------------------------------------------+\n "
return self._exif_tags | -2,373,883,298,329,916,400 | Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+ | picamera/camera.py | exif_tags | RobertLucian/picamera | python | @property
def exif_tags(self):
"\n Holds a mapping of the Exif tags to apply to captured images.\n\n .. note::\n\n Please note that Exif tagging is only supported with the ``jpeg``\n format.\n\n By default several Exif tags are automatically applied to any images\n taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to\n ``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and\n three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and\n ``EXIF.DateTimeDigitized`` which are all set to the current date and\n time just before the picture is taken.\n\n If you wish to set additional Exif tags, or override any of the\n aforementioned tags, simply add entries to the exif_tags map before\n calling :meth:`capture`. For example::\n\n camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'\n\n The Exif standard mandates ASCII encoding for all textual values, hence\n strings containing non-ASCII characters will cause an encoding error to\n be raised when :meth:`capture` is called. If you wish to set binary\n values, use a :func:`bytes` value::\n\n camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'\n\n .. warning::\n\n Binary Exif values are currently ignored; this appears to be a\n libmmal or firmware bug.\n\n You may also specify datetime values, integer, or float values, all of\n which will be converted to appropriate ASCII strings (datetime values\n are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif\n standard).\n\n The currently supported Exif tags are:\n\n +-------+-------------------------------------------------------------+\n | Group | Tags |\n +=======+=============================================================+\n | IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |\n | IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |\n | | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |\n | | StripByteCounts, Xresolution, Yresolution, |\n | | PlanarConfiguration, ResolutionUnit, TransferFunction, |\n | | Software, DateTime, Artist, WhitePoint, |\n | | PrimaryChromaticities, JPEGInterchangeFormat, |\n | | JPEGInterchangeFormatLength, YcbCrCoefficients, |\n | | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |\n | | Copyright |\n +-------+-------------------------------------------------------------+\n | EXIF | ExposureTime, FNumber, ExposureProgram, |\n | | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |\n | | DateTimeOriginal, DateTimeDigitized, |\n | | ComponentsConfiguration, CompressedBitsPerPixel, |\n | | ShutterSpeedValue, ApertureValue, BrightnessValue, |\n | | ExposureBiasValue, MaxApertureValue, SubjectDistance, |\n | | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |\n | | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |\n | | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |\n | | PixelXDimension, PixelYDimension, RelatedSoundFile, |\n | | FlashEnergy, SpacialFrequencyResponse, |\n | | FocalPlaneXResolution, FocalPlaneYResolution, |\n | | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |\n | | SensingMethod, FileSource, SceneType, CFAPattern, |\n | | CustomRendered, ExposureMode, WhiteBalance, |\n | | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |\n | | GainControl, Contrast, Saturation, Sharpness, |\n | | DeviceSettingDescription, SubjectDistanceRange, |\n | | ImageUniqueID |\n +-------+-------------------------------------------------------------+\n | GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |\n | | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |\n | | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |\n | | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |\n | | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |\n | | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |\n | | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |\n | | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |\n | | GPSAreaInformation, GPSDateStamp, GPSDifferential |\n +-------+-------------------------------------------------------------+\n | EINT | InteroperabilityIndex, InteroperabilityVersion, |\n | | RelatedImageFileFormat, RelatedImageWidth, |\n | | RelatedImageLength |\n +-------+-------------------------------------------------------------+\n "
return self._exif_tags |
def _disable_camera(self):
'\n An internal method for disabling the camera, e.g. for re-configuration.\n This disables the splitter and preview connections (if they exist).\n '
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable() | 3,535,478,066,715,093,000 | An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist). | picamera/camera.py | _disable_camera | RobertLucian/picamera | python | def _disable_camera(self):
'\n An internal method for disabling the camera, e.g. for re-configuration.\n This disables the splitter and preview connections (if they exist).\n '
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable() |
def _enable_camera(self):
'\n An internal method for enabling the camera after re-configuration.\n This ensures the splitter configuration is consistent, then re-enables\n the camera along with the splitter and preview connections.\n '
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable() | 1,428,951,581,791,775,700 | An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections. | picamera/camera.py | _enable_camera | RobertLucian/picamera | python | def _enable_camera(self):
'\n An internal method for enabling the camera after re-configuration.\n This ensures the splitter configuration is consistent, then re-enables\n the camera along with the splitter and preview connections.\n '
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable() |
def _configure_splitter(self):
'\n Ensures all splitter output ports have a sensible format (I420) and\n buffer sizes.\n\n This method is used to ensure the splitter configuration is sane,\n typically after :meth:`_configure_camera` is called.\n '
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit() | -1,595,100,311,936,897,000 | Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called. | picamera/camera.py | _configure_splitter | RobertLucian/picamera | python | def _configure_splitter(self):
'\n Ensures all splitter output ports have a sensible format (I420) and\n buffer sizes.\n\n This method is used to ensure the splitter configuration is sane,\n typically after :meth:`_configure_camera` is called.\n '
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit() |
def _configure_camera(self, sensor_mode, framerate, resolution, clock_mode, old_sensor_mode=0):
"\n An internal method for setting a new camera mode, framerate,\n resolution, and/or clock_mode.\n\n This method is used by the setters of the :attr:`resolution`,\n :attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the\n camera is currently disabled. The *old_mode* and *new_mode* arguments\n are required to ensure correct operation on older firmwares\n (specifically that we don't try to set the sensor mode when both old\n and new modes are 0 or automatic).\n "
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE]) for port in self._camera.outputs]
if ((old_sensor_mode != 0) or (sensor_mode != 0)):
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if (not self._camera.control.enabled):
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize == self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
(fps_low, fps_high) = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(mmal.MMAL_PARAMETER_HEADER_T(mmal.MMAL_PARAMETER_FPS_RANGE, ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)), fps_low=mo.to_rational(fps_low), fps_high=mo.to_rational(fps_high))
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, (fps_high // 10))
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
if ((preview_resolution.width > resolution.width) or (preview_resolution.height > resolution.height)):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if (port.index == self.CAMERA_PREVIEW_PORT):
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for (port, (res, fps, fps_range)) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise | 863,730,291,302,044,500 | An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic). | picamera/camera.py | _configure_camera | RobertLucian/picamera | python | def _configure_camera(self, sensor_mode, framerate, resolution, clock_mode, old_sensor_mode=0):
"\n An internal method for setting a new camera mode, framerate,\n resolution, and/or clock_mode.\n\n This method is used by the setters of the :attr:`resolution`,\n :attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the\n camera is currently disabled. The *old_mode* and *new_mode* arguments\n are required to ensure correct operation on older firmwares\n (specifically that we don't try to set the sensor mode when both old\n and new modes are 0 or automatic).\n "
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE]) for port in self._camera.outputs]
if ((old_sensor_mode != 0) or (sensor_mode != 0)):
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if (not self._camera.control.enabled):
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize == self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
(fps_low, fps_high) = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(mmal.MMAL_PARAMETER_HEADER_T(mmal.MMAL_PARAMETER_FPS_RANGE, ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)), fps_low=mo.to_rational(fps_low), fps_high=mo.to_rational(fps_high))
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, (fps_high // 10))
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
if ((preview_resolution.width > resolution.width) or (preview_resolution.height > resolution.height)):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if (port.index == self.CAMERA_PREVIEW_PORT):
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for (port, (res, fps, fps_range)) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise |
@logical_and.register('Number', 'Number')
def _logical_and_scala(x, y):
'\n Return logical and operation result of x and y.\n\n Args:\n x(Number): Number.\n y(Number): Number.\n\n Returns:\n bool, Return logical and operation result of x and y.\n '
return F.bool_and(x.__bool__(), y.__bool__()) | -7,286,355,318,795,096,000 | Return logical and operation result of x and y.
Args:
x(Number): Number.
y(Number): Number.
Returns:
bool, Return logical and operation result of x and y. | mindspore/ops/composite/multitype_ops/logical_and_impl.py | _logical_and_scala | Gavin-Hoang/mindspore | python | @logical_and.register('Number', 'Number')
def _logical_and_scala(x, y):
'\n Return logical and operation result of x and y.\n\n Args:\n x(Number): Number.\n y(Number): Number.\n\n Returns:\n bool, Return logical and operation result of x and y.\n '
return F.bool_and(x.__bool__(), y.__bool__()) |
@logical_and.register('Tensor', 'Tensor')
def _logical_and_tensor(x, y):
'\n Return logical and operation result of x and y.\n\n Args:\n x(Tensor): Tensor.\n y(Tensor): Tensor.\n\n Returns:\n Tensor, Return logical and operation result of x and y.\n '
return F.logical_and(x, y) | 4,841,098,281,168,699,000 | Return logical and operation result of x and y.
Args:
x(Tensor): Tensor.
y(Tensor): Tensor.
Returns:
Tensor, Return logical and operation result of x and y. | mindspore/ops/composite/multitype_ops/logical_and_impl.py | _logical_and_tensor | Gavin-Hoang/mindspore | python | @logical_and.register('Tensor', 'Tensor')
def _logical_and_tensor(x, y):
'\n Return logical and operation result of x and y.\n\n Args:\n x(Tensor): Tensor.\n y(Tensor): Tensor.\n\n Returns:\n Tensor, Return logical and operation result of x and y.\n '
return F.logical_and(x, y) |
def test_create_file(self):
'Test the creation of a simple XlsxWriter file.'
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [45740032, 45747200]
data = [[39083, 39084, 39085, 39086, 39087], [27.2, 25.03, 19.05, 20.34, 18.5], [23.49, 19.55, 15.12, 17.84, 16.34], [25.45, 23.05, 17.32, 20.45, 17.34]]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$D$1:$D$5', 'data_labels': {'value': 1, 'position': 'right'}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() | -3,457,937,152,193,595,000 | Test the creation of a simple XlsxWriter file. | xlsxwriter/test/comparison/test_chart_data_labels17.py | test_create_file | hugovk/XlsxWriter | python | def test_create_file(self):
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [45740032, 45747200]
data = [[39083, 39084, 39085, 39086, 39087], [27.2, 25.03, 19.05, 20.34, 18.5], [23.49, 19.55, 15.12, 17.84, 16.34], [25.45, 23.05, 17.32, 20.45, 17.34]]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$C$1:$C$5'})
chart.add_series({'categories': '=Sheet1!$A$1:$A$5', 'values': '=Sheet1!$D$1:$D$5', 'data_labels': {'value': 1, 'position': 'right'}})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() |
def get_key(node):
'Generate a fresh key on node\n\n Returns a named tuple of privkey, pubkey and all address and scripts.'
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
return Key(privkey=node.dumpprivkey(addr), pubkey=pubkey, p2pkh_script=key_to_p2pkh_script(pubkey).hex(), p2pkh_addr=key_to_p2pkh(pubkey), p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(), p2wpkh_addr=key_to_p2wpkh(pubkey), p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(), p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(), p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey)) | -563,035,140,004,875,460 | Generate a fresh key on node
Returns a named tuple of privkey, pubkey and all address and scripts. | test/functional/test_framework/wallet_util.py | get_key | ludirium/ludirium | python | def get_key(node):
'Generate a fresh key on node\n\n Returns a named tuple of privkey, pubkey and all address and scripts.'
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
return Key(privkey=node.dumpprivkey(addr), pubkey=pubkey, p2pkh_script=key_to_p2pkh_script(pubkey).hex(), p2pkh_addr=key_to_p2pkh(pubkey), p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(), p2wpkh_addr=key_to_p2wpkh(pubkey), p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(), p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(), p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey)) |
def get_generate_key():
'Generate a fresh key\n\n Returns a named tuple of privkey, pubkey and all address and scripts.'
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
return Key(privkey=privkey, pubkey=pubkey, p2pkh_script=key_to_p2pkh_script(pubkey).hex(), p2pkh_addr=key_to_p2pkh(pubkey), p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(), p2wpkh_addr=key_to_p2wpkh(pubkey), p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(), p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(), p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey)) | 787,008,133,391,978,900 | Generate a fresh key
Returns a named tuple of privkey, pubkey and all address and scripts. | test/functional/test_framework/wallet_util.py | get_generate_key | ludirium/ludirium | python | def get_generate_key():
'Generate a fresh key\n\n Returns a named tuple of privkey, pubkey and all address and scripts.'
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
return Key(privkey=privkey, pubkey=pubkey, p2pkh_script=key_to_p2pkh_script(pubkey).hex(), p2pkh_addr=key_to_p2pkh(pubkey), p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(), p2wpkh_addr=key_to_p2wpkh(pubkey), p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(), p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(), p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey)) |
def get_multisig(node):
'Generate a fresh 2-of-3 multisig on node\n\n Returns a named tuple of privkeys, pubkeys and all address and scripts.'
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript((([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys]) + [OP_3, OP_CHECKMULTISIG]))
witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs], pubkeys=pubkeys, p2sh_script=script_to_p2sh_script(script_code).hex(), p2sh_addr=script_to_p2sh(script_code), redeem_script=script_code.hex(), p2wsh_script=witness_script.hex(), p2wsh_addr=script_to_p2wsh(script_code), p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(), p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code)) | -5,366,923,151,664,502,000 | Generate a fresh 2-of-3 multisig on node
Returns a named tuple of privkeys, pubkeys and all address and scripts. | test/functional/test_framework/wallet_util.py | get_multisig | ludirium/ludirium | python | def get_multisig(node):
'Generate a fresh 2-of-3 multisig on node\n\n Returns a named tuple of privkeys, pubkeys and all address and scripts.'
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript((([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys]) + [OP_3, OP_CHECKMULTISIG]))
witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs], pubkeys=pubkeys, p2sh_script=script_to_p2sh_script(script_code).hex(), p2sh_addr=script_to_p2sh(script_code), redeem_script=script_code.hex(), p2wsh_script=witness_script.hex(), p2wsh_addr=script_to_p2wsh(script_code), p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(), p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code)) |
def test_address(node, address, **kwargs):
'Get address info for `address` and test whether the returned values are as expected.'
addr_info = node.getaddressinfo(address)
for (key, value) in kwargs.items():
if (value is None):
if (key in addr_info.keys()):
raise AssertionError('key {} unexpectedly returned in getaddressinfo.'.format(key))
elif (addr_info[key] != value):
raise AssertionError('key {} value {} did not match expected value {}'.format(key, addr_info[key], value)) | 2,198,220,858,924,984,800 | Get address info for `address` and test whether the returned values are as expected. | test/functional/test_framework/wallet_util.py | test_address | ludirium/ludirium | python | def test_address(node, address, **kwargs):
addr_info = node.getaddressinfo(address)
for (key, value) in kwargs.items():
if (value is None):
if (key in addr_info.keys()):
raise AssertionError('key {} unexpectedly returned in getaddressinfo.'.format(key))
elif (addr_info[key] != value):
raise AssertionError('key {} value {} did not match expected value {}'.format(key, addr_info[key], value)) |
def read_global_config(path: Text) -> Dict[(Text, Any)]:
'Read global Rasa configuration.\n\n Args:\n path: Path to the configuration\n Returns:\n The global configuration\n '
try:
return rasa.shared.utils.io.read_config_file(path)
except Exception:
return {} | 8,840,443,311,206,454,000 | Read global Rasa configuration.
Args:
path: Path to the configuration
Returns:
The global configuration | rasa/utils/common.py | read_global_config | karen-white/rasa | python | def read_global_config(path: Text) -> Dict[(Text, Any)]:
'Read global Rasa configuration.\n\n Args:\n path: Path to the configuration\n Returns:\n The global configuration\n '
try:
return rasa.shared.utils.io.read_config_file(path)
except Exception:
return {} |
def set_log_level(log_level: Optional[int]=None):
"Set log level of Rasa and Tensorflow either to the provided log level or\n to the log level specified in the environment variable 'LOG_LEVEL'. If none is set\n a default log level will be used."
if (not log_level):
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger('rasa').setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level) | 7,298,357,630,811,053,000 | Set log level of Rasa and Tensorflow either to the provided log level or
to the log level specified in the environment variable 'LOG_LEVEL'. If none is set
a default log level will be used. | rasa/utils/common.py | set_log_level | karen-white/rasa | python | def set_log_level(log_level: Optional[int]=None):
"Set log level of Rasa and Tensorflow either to the provided log level or\n to the log level specified in the environment variable 'LOG_LEVEL'. If none is set\n a default log level will be used."
if (not log_level):
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger('rasa').setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level) |
def update_tensorflow_log_level() -> None:
"Set the log level of Tensorflow to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if (log_level == 'DEBUG'):
tf_log_level = tf.compat.v1.logging.DEBUG
elif (log_level == 'INFO'):
tf_log_level = tf.compat.v1.logging.INFO
elif (log_level == 'WARNING'):
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger('tensorflow').propagate = False | -6,638,952,293,186,640,000 | Set the log level of Tensorflow to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'. | rasa/utils/common.py | update_tensorflow_log_level | karen-white/rasa | python | def update_tensorflow_log_level() -> None:
"Set the log level of Tensorflow to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if (log_level == 'DEBUG'):
tf_log_level = tf.compat.v1.logging.DEBUG
elif (log_level == 'INFO'):
tf_log_level = tf.compat.v1.logging.INFO
elif (log_level == 'WARNING'):
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger('tensorflow').propagate = False |
def update_sanic_log_level(log_file: Optional[Text]=None):
"Set the log level of sanic loggers to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if (log_file is not None):
formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler) | -5,810,610,133,777,619,000 | Set the log level of sanic loggers to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'. | rasa/utils/common.py | update_sanic_log_level | karen-white/rasa | python | def update_sanic_log_level(log_file: Optional[Text]=None):
"Set the log level of sanic loggers to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if (log_file is not None):
formatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler) |
def update_asyncio_log_level() -> None:
"Set the log level of asyncio to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger('asyncio').setLevel(log_level) | -7,843,690,206,043,967,000 | Set the log level of asyncio to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'. | rasa/utils/common.py | update_asyncio_log_level | karen-white/rasa | python | def update_asyncio_log_level() -> None:
"Set the log level of asyncio to the log level specified in the environment\n variable 'LOG_LEVEL_LIBRARIES'."
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger('asyncio').setLevel(log_level) |
def set_log_and_warnings_filters() -> None:
'\n Set log filters on the root logger, and duplicate filters for warnings.\n\n Filters only propagate on handlers, not loggers.\n '
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings('once', category=UserWarning) | 5,511,566,338,182,023,000 | Set log filters on the root logger, and duplicate filters for warnings.
Filters only propagate on handlers, not loggers. | rasa/utils/common.py | set_log_and_warnings_filters | karen-white/rasa | python | def set_log_and_warnings_filters() -> None:
'\n Set log filters on the root logger, and duplicate filters for warnings.\n\n Filters only propagate on handlers, not loggers.\n '
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings('once', category=UserWarning) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.