body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def __init__(self, F, poly):
'Define the Extension Field and the representative polynomial\n '
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz | 7,119,987,552,794,296,000 | Define the Extension Field and the representative polynomial | mathTools/field.py | __init__ | ecuvelier/PPAT | python | def __init__(self, F, poly):
'\n '
self.F = F
self.poly = poly
self.siz = len(poly.coef)
self.deg = self.siz |
def iszero(self):
'Return True if it is a zero polynomial (each coefficient is zero)\n This does not return True if the polynomial is the polynomial that generates the extension field\n '
cond = True
for i in self.coef:
pcond = i.iszero()
cond = (pcond * cond)
return cond | 3,512,125,350,708,358,700 | Return True if it is a zero polynomial (each coefficient is zero)
This does not return True if the polynomial is the polynomial that generates the extension field | mathTools/field.py | iszero | ecuvelier/PPAT | python | def iszero(self):
'Return True if it is a zero polynomial (each coefficient is zero)\n This does not return True if the polynomial is the polynomial that generates the extension field\n '
cond = True
for i in self.coef:
pcond = i.iszero()
cond = (pcond * cond)
return cond |
def truedeg(self):
'Return the position of the first non zero coefficient and the actual degree of the polynomial\n '
if self.iszero():
return (0, 0)
n = 0
while (self.coef[n] == self.F.zero()):
n = (n + 1)
return (n, (self.deg - n)) | -7,869,618,672,398,647,000 | Return the position of the first non zero coefficient and the actual degree of the polynomial | mathTools/field.py | truedeg | ecuvelier/PPAT | python | def truedeg(self):
'\n '
if self.iszero():
return (0, 0)
n = 0
while (self.coef[n] == self.F.zero()):
n = (n + 1)
return (n, (self.deg - n)) |
def mdot(*args):
'chained matrix product: mdot(A,B,C,..) = A*B*C*...\n No attempt is made to optimize the contraction order.'
r = args[0]
for a in args[1:]:
r = dot(r, a)
return r | 8,142,903,852,845,072,000 | chained matrix product: mdot(A,B,C,..) = A*B*C*...
No attempt is made to optimize the contraction order. | pyscf/tools/Molpro2Pyscf/wmme.py | mdot | JFurness1/pyscf | python | def mdot(*args):
'chained matrix product: mdot(A,B,C,..) = A*B*C*...\n No attempt is made to optimize the contraction order.'
r = args[0]
for a in args[1:]:
r = dot(r, a)
return r |
def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
'Outputs: an array of tuples (cmdline-arguments,filename-base).\n We will generate arguments for each of them and try to read the\n corresponding files as numpy arrays and return them in order.'
from tempfile import mkdtemp
from shutil import rmtree
from subprocess import check_output, CalledProcessError
BasePath = mkdtemp(prefix='wmme.', dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if (BfIntDir is None):
BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if (BasisLibDir is None):
BasisLibDir = path.join(BfIntDir, 'bases')
MakeIntegralsExecutable = path.join(BfIntDir, 'wmme')
FileNameXyz = path.join(BasePath, 'ATOMS')
Args = [o for o in BaseArgs]
Args.append('--matrix-format=npy')
for BasisLib in BasisLibs:
Args.append(('--basis-lib=%s' % path.join(BasisLibDir, BasisLib)))
Args.append(('--atoms-au=%s' % FileNameXyz))
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if (BasisObj is None):
continue
if isinstance(BasisObj, FBasisSet):
BasisFile = path.join(BasePath, ('BASIS%i' % iWrittenBasis))
iWrittenBasis += 1
with open(BasisFile, 'w') as File:
File.write(BasisObj.FmtCr())
Args.append(("%s='!%s'" % (ParamName, BasisFile)))
else:
assert isinstance(BasisObj, str)
Args.append(('%s=%s' % (ParamName, BasisObj)))
pass
FileNameOutputs = []
for (ArgName, FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append(("%s='%s'" % (ArgName, FileName)))
XyzLines = ('%i\n\n%s\n' % (len(Atoms), Atoms.MakeXyz('%24.16f')))
try:
with open(FileNameXyz, 'w') as File:
File.write(XyzLines)
if Inputs:
for (ArgName, FileNameBase, Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName, Array)
Args.append(("%s='%s'" % (ArgName, FileName)))
Cmd = ('%s %s' % (MakeIntegralsExecutable, ' '.join(Args)))
try:
Output = check_output(Cmd, shell=True)
if (version_info >= (3, 0)):
Output = Output.decode('utf-8')
except CalledProcessError as e:
raise Exception(('Integral calculation failed. Output was:\n%s\nException was: %s' % (e.output, str(e))))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
Cleanup()
return tuple(OutputArrays) | 7,459,019,133,808,410,000 | Outputs: an array of tuples (cmdline-arguments,filename-base).
We will generate arguments for each of them and try to read the
corresponding files as numpy arrays and return them in order. | pyscf/tools/Molpro2Pyscf/wmme.py | _InvokeBfint | JFurness1/pyscf | python | def _InvokeBfint(Atoms, Bases, BasisLibs, BaseArgs, Outputs, Inputs=None):
'Outputs: an array of tuples (cmdline-arguments,filename-base).\n We will generate arguments for each of them and try to read the\n corresponding files as numpy arrays and return them in order.'
from tempfile import mkdtemp
from shutil import rmtree
from subprocess import check_output, CalledProcessError
BasePath = mkdtemp(prefix='wmme.', dir=_TmpDir)
def Cleanup():
rmtree(BasePath)
pass
BfIntDir = _WmmeDir
if (BfIntDir is None):
BfIntDir = GetModulePath()
BasisLibDir = _BasisLibDir
if (BasisLibDir is None):
BasisLibDir = path.join(BfIntDir, 'bases')
MakeIntegralsExecutable = path.join(BfIntDir, 'wmme')
FileNameXyz = path.join(BasePath, 'ATOMS')
Args = [o for o in BaseArgs]
Args.append('--matrix-format=npy')
for BasisLib in BasisLibs:
Args.append(('--basis-lib=%s' % path.join(BasisLibDir, BasisLib)))
Args.append(('--atoms-au=%s' % FileNameXyz))
iWrittenBasis = 0
for (ParamName, BasisObj) in Bases.items():
if (BasisObj is None):
continue
if isinstance(BasisObj, FBasisSet):
BasisFile = path.join(BasePath, ('BASIS%i' % iWrittenBasis))
iWrittenBasis += 1
with open(BasisFile, 'w') as File:
File.write(BasisObj.FmtCr())
Args.append(("%s='!%s'" % (ParamName, BasisFile)))
else:
assert isinstance(BasisObj, str)
Args.append(('%s=%s' % (ParamName, BasisObj)))
pass
FileNameOutputs = []
for (ArgName, FileNameBase) in Outputs:
FileName = path.join(BasePath, FileNameBase)
FileNameOutputs.append(FileName)
Args.append(("%s='%s'" % (ArgName, FileName)))
XyzLines = ('%i\n\n%s\n' % (len(Atoms), Atoms.MakeXyz('%24.16f')))
try:
with open(FileNameXyz, 'w') as File:
File.write(XyzLines)
if Inputs:
for (ArgName, FileNameBase, Array) in Inputs:
FileName = path.join(BasePath, FileNameBase)
np.save(FileName, Array)
Args.append(("%s='%s'" % (ArgName, FileName)))
Cmd = ('%s %s' % (MakeIntegralsExecutable, ' '.join(Args)))
try:
Output = check_output(Cmd, shell=True)
if (version_info >= (3, 0)):
Output = Output.decode('utf-8')
except CalledProcessError as e:
raise Exception(('Integral calculation failed. Output was:\n%s\nException was: %s' % (e.output, str(e))))
OutputArrays = []
for FileName in FileNameOutputs:
OutputArrays.append(np.load(FileName))
except:
Cleanup()
raise
Cleanup()
return tuple(OutputArrays) |
def __init__(self, Positions, Elements, Orientations=None, Name=None):
'Positions: 3 x nAtom matrix. Given in atomic units (ABohr).\n Elements: element name (e.g., H) for each of the positions.\n Orientations: If given, a [3,3,N] array encoding the standard\n orientation of the given atoms (for replicating potentials!). For\n each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez\n directions.'
self.Pos = Positions
assert ((self.Pos.shape[0] == 3) and (self.Pos.shape[1] == len(Elements)))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name | -7,393,387,328,684,770,000 | Positions: 3 x nAtom matrix. Given in atomic units (ABohr).
Elements: element name (e.g., H) for each of the positions.
Orientations: If given, a [3,3,N] array encoding the standard
orientation of the given atoms (for replicating potentials!). For
each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez
directions. | pyscf/tools/Molpro2Pyscf/wmme.py | __init__ | JFurness1/pyscf | python | def __init__(self, Positions, Elements, Orientations=None, Name=None):
'Positions: 3 x nAtom matrix. Given in atomic units (ABohr).\n Elements: element name (e.g., H) for each of the positions.\n Orientations: If given, a [3,3,N] array encoding the standard\n orientation of the given atoms (for replicating potentials!). For\n each atom there is a orthogonal 3x3 matrix denoting the ex,ey,ez\n directions.'
self.Pos = Positions
assert ((self.Pos.shape[0] == 3) and (self.Pos.shape[1] == len(Elements)))
self.Elements = Elements
self.Orientations = Orientations
self.Name = Name |
def nElecNeutral(self):
'return number of electrons present in the total system if neutral.'
return sum([ElementNumbers[o] for o in self.Elements]) | 4,413,039,619,599,940,000 | return number of electrons present in the total system if neutral. | pyscf/tools/Molpro2Pyscf/wmme.py | nElecNeutral | JFurness1/pyscf | python | def nElecNeutral(self):
return sum([ElementNumbers[o] for o in self.Elements]) |
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
'Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),\n Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-coreh', 'INT1E'))
Outputs.append(('--save-fint2e', 'INT2E'))
Outputs.append(('--save-overlap', 'OVERLAP'))
(CoreH, Int2e, Overlap) = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return (CoreEnergy, CoreH, Int2e, Overlap)
else:
return (CoreEnergy, CoreH, Int2e) | 1,153,435,770,864,813,000 | Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb) | pyscf/tools/Molpro2Pyscf/wmme.py | MakeBaseIntegrals | JFurness1/pyscf | python | def MakeBaseIntegrals(self, Smh=True, MakeS=False):
'Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),\n Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-coreh', 'INT1E'))
Outputs.append(('--save-fint2e', 'INT2E'))
Outputs.append(('--save-overlap', 'OVERLAP'))
(CoreH, Int2e, Overlap) = self._InvokeBfint(Args, Outputs)
nOrb = CoreH.shape[0]
Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
CoreEnergy = self.Atoms.fCoreRepulsion()
if MakeS:
return (CoreEnergy, CoreH, Int2e, Overlap)
else:
return (CoreEnergy, CoreH, Int2e) |
def MakeOverlaps2(self, OrbBasis2):
'calculate overlap between current basis and a second basis, as\n described in OrbBasis2. Returns <1|2> and <2|2> matrices.'
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(('--save-overlap-2', 'OVERLAP_2'))
Outputs.append(('--save-overlap-12', 'OVERLAP_12'))
(Overlap2, Overlap12) = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return (Overlap2, Overlap12) | -8,342,458,476,742,344,000 | calculate overlap between current basis and a second basis, as
described in OrbBasis2. Returns <1|2> and <2|2> matrices. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeOverlaps2 | JFurness1/pyscf | python | def MakeOverlaps2(self, OrbBasis2):
'calculate overlap between current basis and a second basis, as\n described in OrbBasis2. Returns <1|2> and <2|2> matrices.'
Args = []
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs = []
Outputs.append(('--save-overlap-2', 'OVERLAP_2'))
Outputs.append(('--save-overlap-12', 'OVERLAP_12'))
(Overlap2, Overlap12) = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return (Overlap2, Overlap12) |
def MakeOverlap(self, OrbBasis2=None):
'calculate overlap within main orbital basis, and, optionally, between main\n orbital basis and a second basis, as described in OrbBasis2.\n Returns <1|1>, <1|2>, and <2|2> matrices.'
Args = []
Outputs = []
Outputs.append(('--save-overlap', 'OVERLAP_1'))
if (OrbBasis2 is not None):
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(('--save-overlap-12', 'OVERLAP_12'))
Outputs.append(('--save-overlap-2', 'OVERLAP_2'))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
(Overlap,) = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap | -2,965,824,893,769,018,400 | calculate overlap within main orbital basis, and, optionally, between main
orbital basis and a second basis, as described in OrbBasis2.
Returns <1|1>, <1|2>, and <2|2> matrices. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeOverlap | JFurness1/pyscf | python | def MakeOverlap(self, OrbBasis2=None):
'calculate overlap within main orbital basis, and, optionally, between main\n orbital basis and a second basis, as described in OrbBasis2.\n Returns <1|1>, <1|2>, and <2|2> matrices.'
Args = []
Outputs = []
Outputs.append(('--save-overlap', 'OVERLAP_1'))
if (OrbBasis2 is not None):
MoreBases = {'--basis-orb-2': OrbBasis2}
Outputs.append(('--save-overlap-12', 'OVERLAP_12'))
Outputs.append(('--save-overlap-2', 'OVERLAP_2'))
return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
else:
MoreBases = None
(Overlap,) = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
return Overlap |
def MakeNuclearAttractionIntegrals(self, Smh=True):
'calculate nuclear attraction integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-vnucN', 'VNUC_N'))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((VNucN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == VNucN.shape[0])
assert (VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1]) | -7,974,952,442,303,949,000 | calculate nuclear attraction integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeNuclearAttractionIntegrals | JFurness1/pyscf | python | def MakeNuclearAttractionIntegrals(self, Smh=True):
'calculate nuclear attraction integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-vnucN', 'VNUC_N'))
VNucN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((VNucN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == VNucN.shape[0])
assert (VNucN.shape[1] == len(self.Atoms))
return VNucN.reshape(nOrb, nOrb, VNucN.shape[1]) |
def MakeNuclearSqDistanceIntegrals(self, Smh=True):
'calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-rsqN', 'RSQ_N'))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((RsqN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == RsqN.shape[0])
assert (RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1]) | 441,603,403,193,922,800 | calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeNuclearSqDistanceIntegrals | JFurness1/pyscf | python | def MakeNuclearSqDistanceIntegrals(self, Smh=True):
'calculate <mu|(r-rA)^2|nu> integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-rsqN', 'RSQ_N'))
RsqN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((RsqN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == RsqN.shape[0])
assert (RsqN.shape[1] == len(self.Atoms))
return RsqN.reshape(nOrb, nOrb, RsqN.shape[1]) |
def MakeKineticIntegrals(self, Smh=True):
'calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-kinetic', 'EKIN'))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op | -8,121,335,166,548,213,000 | calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.
Returns nAo x nAo x nAtoms array. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeKineticIntegrals | JFurness1/pyscf | python | def MakeKineticIntegrals(self, Smh=True):
'calculate <mu|-1/2 Laplace|nu> integrals in main basis, for each individual atomic core.\n Returns nAo x nAo x nAtoms array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-kinetic', 'EKIN'))
Op = self._InvokeBfint(Args, Outputs)[0]
return Op |
def MakeDipoleIntegrals(self, Smh=True):
'calculate dipole operator matrices <\\mu|w|\\nu> (w=x,y,z) in\n main basis, for each direction. Returns nAo x nAo x 3 array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-dipole', 'DIPN'))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((DipN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == DipN.shape[0])
assert (DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3) | 3,350,475,485,117,086,700 | calculate dipole operator matrices <\mu|w|\nu> (w=x,y,z) in
main basis, for each direction. Returns nAo x nAo x 3 array. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeDipoleIntegrals | JFurness1/pyscf | python | def MakeDipoleIntegrals(self, Smh=True):
'calculate dipole operator matrices <\\mu|w|\\nu> (w=x,y,z) in\n main basis, for each direction. Returns nAo x nAo x 3 array.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Outputs = []
Outputs.append(('--save-dipole', 'DIPN'))
DipN = self._InvokeBfint(Args, Outputs)[0]
nOrb = int(((DipN.shape[0] ** 0.5) + 0.5))
assert ((nOrb ** 2) == DipN.shape[0])
assert (DipN.shape[1] == 3)
return DipN.reshape(nOrb, nOrb, 3) |
def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
'calculate values of molecular orbitals on a grid of 3d points in space.\n Input:\n - Orbitals: nAo x nOrb matrix, where nAo must be compatible with\n self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).\n - Grid: 3 x nGrid array giving the coordinates of the grid points.\n - DerivativeOrder: 0: only orbital values,\n 1: orbital values and 1st derivatives,\n 2: orbital values and up to 2nd derivatives.\n Returns:\n - nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the\n DerivComp dimension is omitted.\n '
Args = [('--eval-orbitals-dx=%s' % DerivativeOrder)]
Inputs = ([('--eval-orbitals', 'ORBITALS.npy', Orbitals)] + [('--grid-coords', 'GRID.npy', Grid)])
Outputs = [('--save-grid-values', 'ORBS_ON_GRID')]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1, 4, 10][DerivativeOrder]
if (nComp != 1):
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid | -8,708,707,247,343,573,000 | calculate values of molecular orbitals on a grid of 3d points in space.
Input:
- Orbitals: nAo x nOrb matrix, where nAo must be compatible with
self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).
- Grid: 3 x nGrid array giving the coordinates of the grid points.
- DerivativeOrder: 0: only orbital values,
1: orbital values and 1st derivatives,
2: orbital values and up to 2nd derivatives.
Returns:
- nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the
DerivComp dimension is omitted. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeOrbitalsOnGrid | JFurness1/pyscf | python | def MakeOrbitalsOnGrid(self, Orbitals, Grid, DerivativeOrder=0):
'calculate values of molecular orbitals on a grid of 3d points in space.\n Input:\n - Orbitals: nAo x nOrb matrix, where nAo must be compatible with\n self.OrbBasis. The AO dimension must be contravariant AO (i.e., not SMH).\n - Grid: 3 x nGrid array giving the coordinates of the grid points.\n - DerivativeOrder: 0: only orbital values,\n 1: orbital values and 1st derivatives,\n 2: orbital values and up to 2nd derivatives.\n Returns:\n - nGrid x nDerivComp x nOrb array. If DerivativeOrder is 0, the\n DerivComp dimension is omitted.\n '
Args = [('--eval-orbitals-dx=%s' % DerivativeOrder)]
Inputs = ([('--eval-orbitals', 'ORBITALS.npy', Orbitals)] + [('--grid-coords', 'GRID.npy', Grid)])
Outputs = [('--save-grid-values', 'ORBS_ON_GRID')]
(ValuesOnGrid,) = self._InvokeBfint(Args, Outputs, Inputs)
nComp = [1, 4, 10][DerivativeOrder]
if (nComp != 1):
ValuesOnGrid = ValuesOnGrid.reshape((Grid.shape[1], nComp, Orbitals.shape[1]))
return ValuesOnGrid |
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e='coulomb'):
'compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),\n where the fitting metric is *not* absorbed into the 2e integrals.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Args.append(("--kernel2e='%s'" % Kernel2e))
Args.append('--solve-fitting-eq=false')
Outputs = []
Outputs.append(('--save-fint2e', 'INT2E_3IX'))
Outputs.append(('--save-fitting-metric', 'INT2E_METRIC'))
(Int2e_Frs, Int2e_FG) = self._InvokeBfint(Args, Outputs)
nOrb = int(((Int2e_Frs.shape[1] ** 0.5) + 0.5))
assert ((nOrb ** 2) == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert (Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert (Int2e_FG.shape[0] == Int2e_FG.shape[1])
return (Int2e_FG, Int2e_Frs) | 2,896,667,907,041,322,000 | compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
where the fitting metric is *not* absorbed into the 2e integrals. | pyscf/tools/Molpro2Pyscf/wmme.py | MakeRaw2eIntegrals | JFurness1/pyscf | python | def MakeRaw2eIntegrals(self, Smh=True, Kernel2e='coulomb'):
'compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),\n where the fitting metric is *not* absorbed into the 2e integrals.'
Args = []
if Smh:
Args.append('--orb-trafo=Smh')
Args.append(("--kernel2e='%s'" % Kernel2e))
Args.append('--solve-fitting-eq=false')
Outputs = []
Outputs.append(('--save-fint2e', 'INT2E_3IX'))
Outputs.append(('--save-fitting-metric', 'INT2E_METRIC'))
(Int2e_Frs, Int2e_FG) = self._InvokeBfint(Args, Outputs)
nOrb = int(((Int2e_Frs.shape[1] ** 0.5) + 0.5))
assert ((nOrb ** 2) == Int2e_Frs.shape[1])
Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
assert (Int2e_Frs.shape[0] == Int2e_FG.shape[0])
assert (Int2e_FG.shape[0] == Int2e_FG.shape[1])
return (Int2e_FG, Int2e_Frs) |
def service_cidr():
" Return the charm's service-cidr config "
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return (frozen_cidr or hookenv.config('service-cidr')) | 2,082,332,788,383,550,500 | Return the charm's service-cidr config | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | service_cidr | BaiHuoYu/nbp | python | def service_cidr():
" "
db = unitdata.kv()
frozen_cidr = db.get('kubernetes-master.service-cidr')
return (frozen_cidr or hookenv.config('service-cidr')) |
def freeze_service_cidr():
' Freeze the service CIDR. Once the apiserver has started, we can no\n longer safely change this value. '
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr()) | -8,319,294,074,560,905,000 | Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | freeze_service_cidr | BaiHuoYu/nbp | python | def freeze_service_cidr():
' Freeze the service CIDR. Once the apiserver has started, we can no\n longer safely change this value. '
db = unitdata.kv()
db.set('kubernetes-master.service-cidr', service_cidr()) |
@hook('upgrade-charm')
def reset_states_for_delivery():
'An upgrade charm event was triggered by Juju, react to that here.'
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup') | -2,834,097,775,026,214,400 | An upgrade charm event was triggered by Juju, react to that here. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | reset_states_for_delivery | BaiHuoYu/nbp | python | @hook('upgrade-charm')
def reset_states_for_delivery():
migrate_from_pre_snaps()
install_snaps()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup') |
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
'Handle password change via the charms config.'
password = hookenv.config('client_password')
if ((password == '') and is_state('client.password.initialised')):
return
elif (password == ''):
password = token_generator()
setup_basic_auth(password, 'admin', 'admin')
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised') | -6,696,244,841,314,084,000 | Handle password change via the charms config. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | password_changed | BaiHuoYu/nbp | python | @when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
password = hookenv.config('client_password')
if ((password == ) and is_state('client.password.initialised')):
return
elif (password == ):
password = token_generator()
setup_basic_auth(password, 'admin', 'admin')
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised') |
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
" Set master configuration on the CNI relation. This lets the CNI\n subordinate know that we're the master so it can respond accordingly. "
cni.set_config(is_master=True, kubeconfig_path='') | 8,362,492,290,030,831,000 | Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | configure_cni | BaiHuoYu/nbp | python | @when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
" Set master configuration on the CNI relation. This lets the CNI\n subordinate know that we're the master so it can respond accordingly. "
cni.set_config(is_master=True, kubeconfig_path=) |
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'Setup basic authentication and token access for the cluster.'
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
if ((not get_keys_from_leader(keys)) or is_state('reconfigure.authentication.setup')):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if (not os.path.isfile(known_tokens)):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
os.makedirs('/root/cdk', exist_ok=True)
if (not os.path.isfile(service_key)):
cmd = ['openssl', 'genrsa', '-out', service_key, '2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup') | 7,515,041,697,489,415,000 | Setup basic authentication and token access for the cluster. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | setup_leader_authentication | BaiHuoYu/nbp | python | @when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
if ((not get_keys_from_leader(keys)) or is_state('reconfigure.authentication.setup')):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if (not os.path.isfile(known_tokens)):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
os.makedirs('/root/cdk', exist_ok=True)
if (not os.path.isfile(service_key)):
cmd = ['openssl', 'genrsa', '-out', service_key, '2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup') |
def get_keys_from_leader(keys, overwrite_local=False):
'\n Gets the broadcasted keys from the leader and stores them in\n the corresponding files.\n\n Args:\n keys: list of keys. Keys are actually files on the FS.\n\n Returns: True if all key were fetched, False if not.\n\n '
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
if ((not os.path.exists(k)) or overwrite_local):
contents = charms.leadership.leader_get(k)
if (contents is None):
msg = 'Waiting on leaders crypto keys.'
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
with open(k, 'w+') as fp:
fp.write(contents)
return True | 5,011,326,847,538,366,000 | Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | get_keys_from_leader | BaiHuoYu/nbp | python | def get_keys_from_leader(keys, overwrite_local=False):
'\n Gets the broadcasted keys from the leader and stores them in\n the corresponding files.\n\n Args:\n keys: list of keys. Keys are actually files on the FS.\n\n Returns: True if all key were fetched, False if not.\n\n '
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
if ((not os.path.exists(k)) or overwrite_local):
contents = charms.leadership.leader_get(k)
if (contents is None):
msg = 'Waiting on leaders crypto keys.'
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return False
with open(k, 'w+') as fp:
fp.write(contents)
return True |
@when('kubernetes-master.snaps.installed')
def set_app_version():
' Declare the application version to juju '
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[(- 1)].rstrip()) | -7,837,022,965,875,794,000 | Declare the application version to juju | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | set_app_version | BaiHuoYu/nbp | python | @when('kubernetes-master.snaps.installed')
def set_app_version():
' '
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[(- 1)].rstrip()) |
@when('cdk-addons.configured', 'kube-api-endpoint.available', 'kube-control.connected')
def idle_status(kube_api, kube_control):
' Signal at the end of the run that we are running. '
if (not all_kube_system_pods_running()):
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif (hookenv.config('service-cidr') != service_cidr()):
msg = ('WARN: cannot change service-cidr, still using ' + service_cidr())
hookenv.status_set('active', msg)
else:
failing_services = master_services_down()
if (len(failing_services) == 0):
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg) | -87,910,655,510,297,980 | Signal at the end of the run that we are running. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | idle_status | BaiHuoYu/nbp | python | @when('cdk-addons.configured', 'kube-api-endpoint.available', 'kube-control.connected')
def idle_status(kube_api, kube_control):
' '
if (not all_kube_system_pods_running()):
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
elif (hookenv.config('service-cidr') != service_cidr()):
msg = ('WARN: cannot change service-cidr, still using ' + service_cidr())
hookenv.status_set('active', msg)
else:
failing_services = master_services_down()
if (len(failing_services) == 0):
hookenv.status_set('active', 'Kubernetes master running.')
else:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg) |
def master_services_down():
'Ensure master services are up and running.\n\n Return: list of failing services'
services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if (not host.service_running(daemon)):
failing_services.append(service)
return failing_services | 5,637,071,088,973,993,000 | Ensure master services are up and running.
Return: list of failing services | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | master_services_down | BaiHuoYu/nbp | python | def master_services_down():
'Ensure master services are up and running.\n\n Return: list of failing services'
services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if (not host.service_running(daemon)):
failing_services.append(service)
return failing_services |
@when('etcd.available', 'tls_client.server.certificate.saved', 'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
'Run the Kubernetes master components.'
hookenv.status_set('maintenance', 'Configuring the Kubernetes master services.')
freeze_service_cidr()
if (not etcd.get_connection_string()):
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance', 'Starting the Kubernetes master services.')
services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler']
for service in services:
host.service_restart(('snap.%s.daemon' % service))
hookenv.open_port(6443)
set_state('kubernetes-master.components.started') | 704,572,935,404,919,700 | Run the Kubernetes master components. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | start_master | BaiHuoYu/nbp | python | @when('etcd.available', 'tls_client.server.certificate.saved', 'authentication.setup')
@when_not('kubernetes-master.components.started')
def start_master(etcd):
hookenv.status_set('maintenance', 'Configuring the Kubernetes master services.')
freeze_service_cidr()
if (not etcd.get_connection_string()):
return
handle_etcd_relation(etcd)
configure_master_services()
hookenv.status_set('maintenance', 'Starting the Kubernetes master services.')
services = ['kube-apiserver', 'kube-controller-manager', 'kube-scheduler']
for service in services:
host.service_restart(('snap.%s.daemon' % service))
hookenv.open_port(6443)
set_state('kubernetes-master.components.started') |
@when('etcd.available')
def etcd_data_change(etcd):
' Etcd scale events block master reconfiguration due to the\n kubernetes-master.components.started state. We need a way to\n handle these events consistenly only when the number of etcd\n units has actually changed '
connection_string = etcd.get_connection_string()
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started') | 138,625,547,488,238,990 | Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistenly only when the number of etcd
units has actually changed | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | etcd_data_change | BaiHuoYu/nbp | python | @when('etcd.available')
def etcd_data_change(etcd):
' Etcd scale events block master reconfiguration due to the\n kubernetes-master.components.started state. We need a way to\n handle these events consistenly only when the number of etcd\n units has actually changed '
connection_string = etcd.get_connection_string()
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started') |
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
' Send cluster DNS info '
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) | -5,709,654,663,551,629,000 | Send cluster DNS info | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | send_cluster_dns_detail | BaiHuoYu/nbp | python | @when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
' '
dns_ip = get_dns_ip()
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) |
@when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
'Send the tokens to the workers.'
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token, proxy_token, admin_token) | -4,402,742,211,720,884,700 | Send the tokens to the workers. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | send_tokens | BaiHuoYu/nbp | python | @when('kube-control.auth.requested')
@when('authentication.setup')
@when('leadership.is_leader')
def send_tokens(kube_control):
kubelet_token = get_token('kubelet')
proxy_token = get_token('kube_proxy')
admin_token = get_token('admin')
requests = kube_control.auth_user()
for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token, proxy_token, admin_token) |
@when_not('kube-control.connected')
def missing_kube_control():
"Inform the operator master is waiting for a relation to workers.\n\n If deploying via bundle this won't happen, but if operator is upgrading a\n a charm in a deployment that pre-dates the kube-control relation, it'll be\n missing.\n\n "
hookenv.status_set('blocked', 'Waiting for workers.') | 1,777,728,462,669,904,100 | Inform the operator master is waiting for a relation to workers.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | missing_kube_control | BaiHuoYu/nbp | python | @when_not('kube-control.connected')
def missing_kube_control():
"Inform the operator master is waiting for a relation to workers.\n\n If deploying via bundle this won't happen, but if operator is upgrading a\n a charm in a deployment that pre-dates the kube-control relation, it'll be\n missing.\n\n "
hookenv.status_set('blocked', 'Waiting for workers.') |
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
' Send configuration to the load balancer, and close access to the\n public interface '
kube_api.configure(port=6443) | 5,358,579,529,708,485,000 | Send configuration to the load balancer, and close access to the
public interface | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | push_service_data | BaiHuoYu/nbp | python | @when('kube-api-endpoint.available')
def push_service_data(kube_api):
' Send configuration to the load balancer, and close access to the\n public interface '
kube_api.configure(port=6443) |
@when('certificates.available')
def send_data(tls):
'Send the data that is required to create a server certificate for\n this server.'
common_name = hookenv.unit_public_ip()
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
sans = [hookenv.unit_public_ip(), hookenv.unit_private_ip(), socket.gethostname(), kubernetes_service_ip, 'kubernetes', 'kubernetes.{0}'.format(domain), 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.{0}'.format(domain)]
certificate_name = hookenv.local_unit().replace('/', '_')
tls.request_server_cert(common_name, sans, certificate_name) | 4,849,997,581,079,090,000 | Send the data that is required to create a server certificate for
this server. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | send_data | BaiHuoYu/nbp | python | @when('certificates.available')
def send_data(tls):
'Send the data that is required to create a server certificate for\n this server.'
common_name = hookenv.unit_public_ip()
kubernetes_service_ip = get_kubernetes_service_ip()
domain = hookenv.config('dns_domain')
sans = [hookenv.unit_public_ip(), hookenv.unit_private_ip(), socket.gethostname(), kubernetes_service_ip, 'kubernetes', 'kubernetes.{0}'.format(domain), 'kubernetes.default', 'kubernetes.default.svc', 'kubernetes.default.svc.{0}'.format(domain)]
certificate_name = hookenv.local_unit().replace('/', '_')
tls.request_server_cert(common_name, sans, certificate_name) |
@when('kubernetes-master.components.started')
def configure_cdk_addons():
' Configure CDK addons '
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [('arch=' + arch()), ('dns-ip=' + get_dns_ip()), ('dns-domain=' + hookenv.config('dns_domain')), ('enable-dashboard=' + dbEnabled)]
check_call((['snap', 'set', 'cdk-addons'] + args))
if (not addons_ready()):
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured') | -2,867,987,770,483,336,000 | Configure CDK addons | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | configure_cdk_addons | BaiHuoYu/nbp | python | @when('kubernetes-master.components.started')
def configure_cdk_addons():
' '
remove_state('cdk-addons.configured')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
args = [('arch=' + arch()), ('dns-ip=' + get_dns_ip()), ('dns-domain=' + hookenv.config('dns_domain')), ('enable-dashboard=' + dbEnabled)]
check_call((['snap', 'set', 'cdk-addons'] + args))
if (not addons_ready()):
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured') |
@retry(times=3, delay_secs=20)
def addons_ready():
'\n Test if the add ons got installed\n\n Returns: True is the addons got applied\n\n '
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log('Addons are not ready yet.')
return False | -7,442,323,682,676,021,000 | Test if the add ons got installed
Returns: True is the addons got applied | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | addons_ready | BaiHuoYu/nbp | python | @retry(times=3, delay_secs=20)
def addons_ready():
'\n Test if the add ons got installed\n\n Returns: True is the addons got applied\n\n '
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log('Addons are not ready yet.')
return False |
@when('certificates.ca.available', 'certificates.client.cert.available', 'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'Create a kubernetes configuration for the master unit.'
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server) | 6,422,452,282,395,083,000 | Create a kubernetes configuration for the master unit. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | create_self_config | BaiHuoYu/nbp | python | @when('certificates.ca.available', 'certificates.client.cert.available', 'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server) |
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
' Determine if we should remove the state that controls the re-render\n and execution of the ceph-relation-changed event because there\n are changes in the relationship data, and we should re-render any\n configs, keys, and/or service pre-reqs '
ceph_relation_data = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'hostname': socket.gethostname(), 'key': ceph_admin.key()}
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured') | 4,012,998,128,674,763,300 | Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | ceph_state_control | BaiHuoYu/nbp | python | @when('ceph-storage.available')
def ceph_state_control(ceph_admin):
' Determine if we should remove the state that controls the re-render\n and execution of the ceph-relation-changed event because there\n are changes in the relationship data, and we should re-render any\n configs, keys, and/or service pre-reqs '
ceph_relation_data = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'hostname': socket.gethostname(), 'key': ceph_admin.key()}
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured') |
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'Ceph on kubernetes will require a few things - namely a ceph\n configuration, and the ceph secret key file used for authentication.\n This method will install the client package, and render the requisit files\n in order to consume the ceph-storage relation.'
ceph_context = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'use_syslog': 'true', 'ceph_public_network': '', 'ceph_cluster_network': '', 'loglevel': 1, 'hostname': socket.gethostname()}
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if (not os.path.isdir(etc_ceph_directory)):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
render('ceph.conf', charm_ceph_conf, ceph_context)
admin_key = os.path.join(etc_ceph_directory, 'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write('[client.admin]\n\tkey = {}\n'.format(ceph_admin.key()))
except IOError as err:
hookenv.log('IOError writing admin.keyring: {}'.format(err))
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
return
set_state('ceph-storage.configured') | -2,433,001,908,601,862,700 | Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | ceph_storage | BaiHuoYu/nbp | python | @when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'Ceph on kubernetes will require a few things - namely a ceph\n configuration, and the ceph secret key file used for authentication.\n This method will install the client package, and render the requisit files\n in order to consume the ceph-storage relation.'
ceph_context = {'mon_hosts': ceph_admin.mon_hosts(), 'fsid': ceph_admin.fsid(), 'auth_supported': ceph_admin.auth(), 'use_syslog': 'true', 'ceph_public_network': , 'ceph_cluster_network': , 'loglevel': 1, 'hostname': socket.gethostname()}
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if (not os.path.isdir(etc_ceph_directory)):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
render('ceph.conf', charm_ceph_conf, ceph_context)
admin_key = os.path.join(etc_ceph_directory, 'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write('[client.admin]\n\tkey = {}\n'.format(ceph_admin.key()))
except IOError as err:
hookenv.log('IOError writing admin.keyring: {}'.format(err))
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except:
return
set_state('ceph-storage.configured') |
def is_privileged():
'Return boolean indicating whether or not to set allow-privileged=true.\n\n '
privileged = hookenv.config('allow-privileged')
if (privileged == 'auto'):
return is_state('kubernetes-master.gpu.enabled')
else:
return (privileged == 'true') | -780,240,340,701,585,900 | Return boolean indicating whether or not to set allow-privileged=true. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | is_privileged | BaiHuoYu/nbp | python | def is_privileged():
'\n\n '
privileged = hookenv.config('allow-privileged')
if (privileged == 'auto'):
return is_state('kubernetes-master.gpu.enabled')
else:
return (privileged == 'true') |
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"React to changed 'allow-privileged' config value.\n\n "
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged') | 4,813,052,077,131,673,000 | React to changed 'allow-privileged' config value. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | on_config_allow_privileged_change | BaiHuoYu/nbp | python | @when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"\n\n "
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged') |
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
'The remote side (kubernetes-worker) is gpu-enabled.\n\n We need to run in privileged mode.\n\n '
config = hookenv.config()
if (config['allow-privileged'] == 'false'):
hookenv.status_set('active', 'GPUs available. Set allow-privileged="auto" to enable.')
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled') | 4,215,349,961,622,603,300 | The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | on_gpu_available | BaiHuoYu/nbp | python | @when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
'The remote side (kubernetes-worker) is gpu-enabled.\n\n We need to run in privileged mode.\n\n '
config = hookenv.config()
if (config['allow-privileged'] == 'false'):
hookenv.status_set('active', 'GPUs available. Set allow-privileged="auto" to enable.')
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled') |
@when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
'We were in gpu mode, but the operator has set allow-privileged="false",\n so we can\'t run in gpu mode anymore.\n\n '
remove_state('kubernetes-master.gpu.enabled') | 7,250,460,879,740,247,000 | We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | disable_gpu_mode | BaiHuoYu/nbp | python | @when('kubernetes-master.gpu.enabled')
@when_not('kubernetes-master.privileged')
def disable_gpu_mode():
'We were in gpu mode, but the operator has set allow-privileged="false",\n so we can\'t run in gpu mode anymore.\n\n '
remove_state('kubernetes-master.gpu.enabled') |
@hook('stop')
def shutdown():
' Stop the kubernetes master services\n\n '
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon') | -1,049,840,848,278,248,600 | Stop the kubernetes master services | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | shutdown | BaiHuoYu/nbp | python | @hook('stop')
def shutdown():
' \n\n '
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon') |
def arch():
'Return the package architecture as a string. Raise an exception if the\n architecture is not supported by kubernetes.'
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
architecture = architecture.decode('utf-8')
return architecture | 7,777,717,789,895,950,000 | Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | arch | BaiHuoYu/nbp | python | def arch():
'Return the package architecture as a string. Raise an exception if the\n architecture is not supported by kubernetes.'
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
architecture = architecture.decode('utf-8')
return architecture |
def build_kubeconfig(server):
'Gather the relevant data for Kubernetes configuration objects and create\n a config object with that information.'
layer_options = layer.options('tls-client')
ca = layer_options.get('ca_certificate_path')
ca_exists = (ca and os.path.isfile(ca))
client_pass = get_password('basic_auth.csv', 'admin')
if (ca_exists and client_pass):
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
create_kubeconfig(kubeconfig_path, server, ca, user='admin', password=client_pass)
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd) | -2,934,579,074,449,449,000 | Gather the relevant data for Kubernetes configuration objects and create
a config object with that information. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | build_kubeconfig | BaiHuoYu/nbp | python | def build_kubeconfig(server):
'Gather the relevant data for Kubernetes configuration objects and create\n a config object with that information.'
layer_options = layer.options('tls-client')
ca = layer_options.get('ca_certificate_path')
ca_exists = (ca and os.path.isfile(ca))
client_pass = get_password('basic_auth.csv', 'admin')
if (ca_exists and client_pass):
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
create_kubeconfig(kubeconfig_path, server, ca, user='admin', password=client_pass)
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd) |
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, user='ubuntu', context='juju-context', cluster='juju-cluster', password=None, token=None):
'Create a configuration for Kubernetes based on path using the supplied\n arguments for values of the Kubernetes server, CA, key, certificate, user\n context and cluster.'
if ((not key) and (not certificate) and (not password) and (not token)):
raise ValueError('Missing authentication mechanism.')
if (token and password):
raise ValueError('Token and Password are mutually exclusive.')
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} --server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} '.format(kubeconfig, user)
if (key and certificate):
cmd = '{0} --client-key={1} --client-certificate={2} --embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = '{0} --username={1} --password={2}'.format(cmd, user, password)
if token:
cmd = '{0} --token={1}'.format(cmd, token)
check_call(split(cmd))
cmd = 'kubectl config --kubeconfig={0} set-context {1} --cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context))) | -2,665,510,102,998,262,000 | Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | create_kubeconfig | BaiHuoYu/nbp | python | def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None, user='ubuntu', context='juju-context', cluster='juju-cluster', password=None, token=None):
'Create a configuration for Kubernetes based on path using the supplied\n arguments for values of the Kubernetes server, CA, key, certificate, user\n context and cluster.'
if ((not key) and (not certificate) and (not password) and (not token)):
raise ValueError('Missing authentication mechanism.')
if (token and password):
raise ValueError('Token and Password are mutually exclusive.')
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} --server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} '.format(kubeconfig, user)
if (key and certificate):
cmd = '{0} --client-key={1} --client-certificate={2} --embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = '{0} --username={1} --password={2}'.format(cmd, user, password)
if token:
cmd = '{0} --token={1}'.format(cmd, token)
check_call(split(cmd))
cmd = 'kubectl config --kubeconfig={0} set-context {1} --cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context))) |
def get_dns_ip():
'Get an IP address for the DNS server on the provided cidr.'
interface = ipaddress.IPv4Interface(service_cidr())
ip = (interface.network.network_address + 10)
return ip.exploded | 5,212,188,719,946,503,000 | Get an IP address for the DNS server on the provided cidr. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | get_dns_ip | BaiHuoYu/nbp | python | def get_dns_ip():
interface = ipaddress.IPv4Interface(service_cidr())
ip = (interface.network.network_address + 10)
return ip.exploded |
def get_kubernetes_service_ip():
'Get the IP address for the kubernetes service based on the cidr.'
interface = ipaddress.IPv4Interface(service_cidr())
ip = (interface.network.network_address + 1)
return ip.exploded | 4,044,658,461,190,373,000 | Get the IP address for the kubernetes service based on the cidr. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | get_kubernetes_service_ip | BaiHuoYu/nbp | python | def get_kubernetes_service_ip():
interface = ipaddress.IPv4Interface(service_cidr())
ip = (interface.network.network_address + 1)
return ip.exploded |
def handle_etcd_relation(reldata):
' Save the client credentials and set appropriate daemon flags when\n etcd declares itself as available'
connection_string = reldata.get_connection_string()
etcd_dir = '/root/cdk/etcd'
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
data = api_opts.data
if (data.get('etcd-servers-strict') or data.get('etcd-servers')):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True) | -8,894,728,876,959,341,000 | Save the client credentials and set appropriate daemon flags when
etcd declares itself as available | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | handle_etcd_relation | BaiHuoYu/nbp | python | def handle_etcd_relation(reldata):
' Save the client credentials and set appropriate daemon flags when\n etcd declares itself as available'
connection_string = reldata.get_connection_string()
etcd_dir = '/root/cdk/etcd'
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
data = api_opts.data
if (data.get('etcd-servers-strict') or data.get('etcd-servers')):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True) |
def configure_master_services():
' Add remaining flags for the master services and configure snaps to use\n them '
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2')
admission_control = ['Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds']
if (get_version('kube-apiserver') < (1, 6)):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if (get_version('kube-apiserver') < (1, 7)):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = (['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' '))
check_call(cmd)
cmd = (['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' '))
check_call(cmd)
cmd = (['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' '))
check_call(cmd) | 1,320,986,970,023,214,800 | Add remaining flags for the master services and configure snaps to use
them | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | configure_master_services | BaiHuoYu/nbp | python | def configure_master_services():
' Add remaining flags for the master services and configure snaps to use\n them '
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
scheduler_opts = FlagManager('kube-scheduler')
scheduler_opts.add('v', '2')
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
if is_privileged():
api_opts.add('allow-privileged', 'true', strict=True)
set_state('kubernetes-master.privileged')
else:
api_opts.add('allow-privileged', 'false', strict=True)
remove_state('kubernetes-master.privileged')
api_opts.add('service-cluster-ip-range', service_cidr())
api_opts.add('min-request-timeout', '300')
api_opts.add('v', '4')
api_opts.add('tls-cert-file', server_cert_path)
api_opts.add('tls-private-key-file', server_key_path)
api_opts.add('kubelet-certificate-authority', ca_cert_path)
api_opts.add('kubelet-client-certificate', client_cert_path)
api_opts.add('kubelet-client-key', client_key_path)
api_opts.add('logtostderr', 'true')
api_opts.add('insecure-bind-address', '127.0.0.1')
api_opts.add('insecure-port', '8080')
api_opts.add('storage-backend', 'etcd2')
admission_control = ['Initializers', 'NamespaceLifecycle', 'LimitRanger', 'ServiceAccount', 'ResourceQuota', 'DefaultTolerationSeconds']
if (get_version('kube-apiserver') < (1, 6)):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds')
if (get_version('kube-apiserver') < (1, 7)):
hookenv.log('Removing Initializers from admission-control')
admission_control.remove('Initializers')
api_opts.add('admission-control', ','.join(admission_control), strict=True)
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
scheduler_opts.add('v', '2')
scheduler_opts.add('logtostderr', 'true')
scheduler_opts.add('master', 'http://127.0.0.1:8080')
cmd = (['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' '))
check_call(cmd)
cmd = (['snap', 'set', 'kube-controller-manager'] + controller_opts.to_s().split(' '))
check_call(cmd)
cmd = (['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' '))
check_call(cmd) |
def setup_basic_auth(password=None, username='admin', uid='admin'):
'Create the htacces file and the tokens.'
root_cdk = '/root/cdk'
if (not os.path.isdir(root_cdk)):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if (not password):
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid)) | 3,896,753,039,689,212,000 | Create the htacces file and the tokens. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | setup_basic_auth | BaiHuoYu/nbp | python | def setup_basic_auth(password=None, username='admin', uid='admin'):
root_cdk = '/root/cdk'
if (not os.path.isdir(root_cdk)):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if (not password):
password = token_generator()
with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid)) |
def setup_tokens(token, username, user):
'Create a token file for kubernetes authentication.'
root_cdk = '/root/cdk'
if (not os.path.isdir(root_cdk)):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if (not token):
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user)) | -5,288,888,162,399,618,000 | Create a token file for kubernetes authentication. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | setup_tokens | BaiHuoYu/nbp | python | def setup_tokens(token, username, user):
root_cdk = '/root/cdk'
if (not os.path.isdir(root_cdk)):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if (not token):
token = token_generator()
with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user)) |
def get_password(csv_fname, user):
'Get the password of user within the csv file provided.'
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if (not os.path.isfile(tokens_fname)):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if (record[1] == user):
return record[0]
return None | 1,101,407,316,263,802,400 | Get the password of user within the csv file provided. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | get_password | BaiHuoYu/nbp | python | def get_password(csv_fname, user):
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if (not os.path.isfile(tokens_fname)):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if (record[1] == user):
return record[0]
return None |
def get_token(username):
'Grab a token from the static file if present. '
return get_password('known_tokens.csv', username) | 2,882,756,315,456,273,400 | Grab a token from the static file if present. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | get_token | BaiHuoYu/nbp | python | def get_token(username):
' '
return get_password('known_tokens.csv', username) |
def set_token(password, save_salt):
' Store a token so it can be recalled later by token_generator.\n\n param: password - the password to be stored\n param: save_salt - the key to store the value of the token.'
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt) | 7,914,817,382,534,536,000 | Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | set_token | BaiHuoYu/nbp | python | def set_token(password, save_salt):
' Store a token so it can be recalled later by token_generator.\n\n param: password - the password to be stored\n param: save_salt - the key to store the value of the token.'
db = unitdata.kv()
db.set(save_salt, password)
return db.get(save_salt) |
def token_generator(length=32):
' Generate a random token for use in passwords and account tokens.\n\n param: length - the length of the token to generate'
alpha = (string.ascii_letters + string.digits)
token = ''.join((random.SystemRandom().choice(alpha) for _ in range(length)))
return token | 4,775,048,515,420,518,000 | Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | token_generator | BaiHuoYu/nbp | python | def token_generator(length=32):
' Generate a random token for use in passwords and account tokens.\n\n param: length - the length of the token to generate'
alpha = (string.ascii_letters + string.digits)
token = .join((random.SystemRandom().choice(alpha) for _ in range(length)))
return token |
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
' Check pod status in the kube-system namespace. Returns True if all\n pods are running, False otherwise. '
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if (status != 'Running'):
return False
return True | -5,931,451,774,860,304,000 | Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. | vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | all_kube_system_pods_running | BaiHuoYu/nbp | python | @retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
' Check pod status in the kube-system namespace. Returns True if all\n pods are running, False otherwise. '
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
result = json.loads(output)
for pod in result['items']:
status = pod['status']['phase']
if (status != 'Running'):
return False
return True |
@maybe_login_required
def get(self):
'\n ---\n description: Get a list of commits.\n responses:\n "200": "CommitList"\n "401": "401"\n tags:\n - Commits\n '
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits) | 2,398,455,464,862,368,000 | ---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits | conbench/api/commits.py | get | Christian8491/conbench | python | @maybe_login_required
def get(self):
'\n ---\n description: Get a list of commits.\n responses:\n "200": "CommitList"\n "401": "401"\n tags:\n - Commits\n '
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits) |
@maybe_login_required
def get(self, commit_id):
'\n ---\n description: Get a commit.\n responses:\n "200": "CommitEntity"\n "401": "401"\n "404": "404"\n parameters:\n - name: commit_id\n in: path\n schema:\n type: string\n tags:\n - Commits\n '
commit = self._get(commit_id)
return self.serializer.one.dump(commit) | -4,298,231,717,111,611,400 | ---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits | conbench/api/commits.py | get | Christian8491/conbench | python | @maybe_login_required
def get(self, commit_id):
'\n ---\n description: Get a commit.\n responses:\n "200": "CommitEntity"\n "401": "401"\n "404": "404"\n parameters:\n - name: commit_id\n in: path\n schema:\n type: string\n tags:\n - Commits\n '
commit = self._get(commit_id)
return self.serializer.one.dump(commit) |
def __init__(self, basedir=None, **kwargs):
' Constructor '
self.basedir = basedir | 4,008,232,155,774,888,000 | Constructor | lookup_plugins/oo_option.py | __init__ | Acidburn0zzz/openshift-ansible | python | def __init__(self, basedir=None, **kwargs):
' '
self.basedir = basedir |
def run(self, terms, variables, **kwargs):
' Main execution path '
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = ('cli_' + option_name)
if (('vars' in variables) and (cli_key in variables['vars'])):
ret.append(variables['vars'][cli_key])
elif (option_name in os.environ):
ret.append(os.environ[option_name])
else:
ret.append('')
return ret | -6,311,369,496,282,909,000 | Main execution path | lookup_plugins/oo_option.py | run | Acidburn0zzz/openshift-ansible | python | def run(self, terms, variables, **kwargs):
' '
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = ('cli_' + option_name)
if (('vars' in variables) and (cli_key in variables['vars'])):
ret.append(variables['vars'][cli_key])
elif (option_name in os.environ):
ret.append(os.environ[option_name])
else:
ret.append()
return ret |
def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
'Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https'
if (filename is None):
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = (url[:(- 1)] + '1')
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = (r.headers.get('Content-Length', None) if (size is None) else size)
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if ((stat['type'] == 'file') and (stat['size'] == size)):
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
r.close()
return file_path | 2,216,735,184,912,804,000 | Uses stream=True and a reasonable chunk size to be able to download large (GB) files over https | nlpia/book/examples/ch09.py | download_file | brusic/nlpia | python | def download_file(url, data_path='.', filename=None, size=None, chunk_size=4096, verbose=True):
if (filename is None):
filename = dropbox_basename(url)
file_path = os.path.join(data_path, filename)
if url.endswith('?dl=0'):
url = (url[:(- 1)] + '1')
if verbose:
tqdm_prog = tqdm
print('requesting URL: {}'.format(url))
else:
tqdm_prog = no_tqdm
r = requests.get(url, stream=True, allow_redirects=True)
size = (r.headers.get('Content-Length', None) if (size is None) else size)
print('remote size: {}'.format(size))
stat = path_status(file_path)
print('local size: {}'.format(stat.get('size', None)))
if ((stat['type'] == 'file') and (stat['size'] == size)):
r.close()
return file_path
print('Downloading to {}'.format(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
r.close()
return file_path |
def pre_process_data(filepath):
'\n This is dependent on your training data source but we will try to generalize it as best as possible.\n '
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset | -6,618,729,490,272,597,000 | This is dependent on your training data source but we will try to generalize it as best as possible. | nlpia/book/examples/ch09.py | pre_process_data | brusic/nlpia | python | def pre_process_data(filepath):
'\n \n '
positive_path = os.path.join(filepath, 'pos')
negative_path = os.path.join(filepath, 'neg')
pos_label = 1
neg_label = 0
dataset = []
for filename in glob.glob(os.path.join(positive_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((pos_label, f.read()))
for filename in glob.glob(os.path.join(negative_path, '*.txt')):
with open(filename, 'r') as f:
dataset.append((neg_label, f.read()))
shuffle(dataset)
return dataset |
def collect_expected(dataset):
' Peel of the target values from the dataset '
expected = []
for sample in dataset:
expected.append(sample[0])
return expected | -2,978,209,738,048,376,000 | Peel of the target values from the dataset | nlpia/book/examples/ch09.py | collect_expected | brusic/nlpia | python | def collect_expected(dataset):
' '
expected = []
for sample in dataset:
expected.append(sample[0])
return expected |
def pad_trunc(data, maxlen):
' For a given dataset pad with zero vectors or truncate to maxlen '
new_data = []
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if (len(sample) > maxlen):
temp = sample[:maxlen]
elif (len(sample) < maxlen):
temp = sample
additional_elems = (maxlen - len(sample))
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data | -2,545,233,103,941,332,500 | For a given dataset pad with zero vectors or truncate to maxlen | nlpia/book/examples/ch09.py | pad_trunc | brusic/nlpia | python | def pad_trunc(data, maxlen):
' '
new_data = []
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if (len(sample) > maxlen):
temp = sample[:maxlen]
elif (len(sample) < maxlen):
temp = sample
additional_elems = (maxlen - len(sample))
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data |
def clean_data(data):
' Shift to lower case, replace unknowns with UNK, and listify '
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower():
if (char in VALID):
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data | -5,663,162,335,939,279,000 | Shift to lower case, replace unknowns with UNK, and listify | nlpia/book/examples/ch09.py | clean_data | brusic/nlpia | python | def clean_data(data):
' '
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower():
if (char in VALID):
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data |
def char_pad_trunc(data, maxlen):
' We truncate to maxlen or add in PAD tokens '
new_dataset = []
for sample in data:
if (len(sample) > maxlen):
new_data = sample[:maxlen]
elif (len(sample) < maxlen):
pads = (maxlen - len(sample))
new_data = (sample + (['PAD'] * pads))
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset | -6,277,311,333,360,395,000 | We truncate to maxlen or add in PAD tokens | nlpia/book/examples/ch09.py | char_pad_trunc | brusic/nlpia | python | def char_pad_trunc(data, maxlen):
' '
new_dataset = []
for sample in data:
if (len(sample) > maxlen):
new_data = sample[:maxlen]
elif (len(sample) < maxlen):
pads = (maxlen - len(sample))
new_data = (sample + (['PAD'] * pads))
else:
new_data = sample
new_dataset.append(new_data)
return new_dataset |
def create_dicts(data):
' Modified from Keras LSTM example'
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict(((c, i) for (i, c) in enumerate(chars)))
indices_char = dict(((i, c) for (i, c) in enumerate(chars)))
return (char_indices, indices_char) | 89,649,798,061,459,300 | Modified from Keras LSTM example | nlpia/book/examples/ch09.py | create_dicts | brusic/nlpia | python | def create_dicts(data):
' '
chars = set()
for sample in data:
chars.update(set(sample))
char_indices = dict(((c, i) for (i, c) in enumerate(chars)))
indices_char = dict(((i, c) for (i, c) in enumerate(chars)))
return (char_indices, indices_char) |
def onehot_encode(dataset, char_indices, maxlen):
' \n One hot encode the tokens\n \n Args:\n dataset list of lists of tokens\n char_indices dictionary of {key=character, value=index to use encoding vector}\n maxlen int Length of each sample\n Return:\n np array of shape (samples, tokens, encoding length)\n '
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for (i, sentence) in enumerate(dataset):
for (t, char) in enumerate(sentence):
X[(i, t, char_indices[char])] = 1
return X | 1,302,900,060,204,858,600 | One hot encode the tokens
Args:
dataset list of lists of tokens
char_indices dictionary of {key=character, value=index to use encoding vector}
maxlen int Length of each sample
Return:
np array of shape (samples, tokens, encoding length) | nlpia/book/examples/ch09.py | onehot_encode | brusic/nlpia | python | def onehot_encode(dataset, char_indices, maxlen):
' \n One hot encode the tokens\n \n Args:\n dataset list of lists of tokens\n char_indices dictionary of {key=character, value=index to use encoding vector}\n maxlen int Length of each sample\n Return:\n np array of shape (samples, tokens, encoding length)\n '
X = np.zeros((len(dataset), maxlen, len(char_indices.keys())))
for (i, sentence) in enumerate(dataset):
for (t, char) in enumerate(sentence):
X[(i, t, char_indices[char])] = 1
return X |
def encode(iterator, method='xml', encoding=None, out=None):
'Encode serializer output into a string.\n \n :param iterator: the iterator returned from serializing a stream (basically\n any iterator that yields unicode objects)\n :param method: the serialization method; determines how characters not\n representable in the specified encoding are treated\n :param encoding: how the output string should be encoded; if set to `None`,\n this method returns a `unicode` object\n :param out: a file-like object that the output should be written to\n instead of being returned as one big string; note that if\n this is a file or socket (or similar), the `encoding` must\n not be `None` (that is, the output must be encoded)\n :return: a `str` or `unicode` object (depending on the `encoding`\n parameter), or `None` if the `out` parameter is provided\n \n :since: version 0.4.1\n :note: Changed in 0.5: added the `out` parameter\n '
if (encoding is not None):
errors = 'replace'
if ((method != 'text') and (not isinstance(method, TextSerializer))):
errors = 'xmlcharrefreplace'
_encode = (lambda string: string.encode(encoding, errors))
else:
_encode = (lambda string: string)
if (out is None):
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk)) | 4,164,344,655,488,987,000 | Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | encode | 262877348/Data | python | def encode(iterator, method='xml', encoding=None, out=None):
'Encode serializer output into a string.\n \n :param iterator: the iterator returned from serializing a stream (basically\n any iterator that yields unicode objects)\n :param method: the serialization method; determines how characters not\n representable in the specified encoding are treated\n :param encoding: how the output string should be encoded; if set to `None`,\n this method returns a `unicode` object\n :param out: a file-like object that the output should be written to\n instead of being returned as one big string; note that if\n this is a file or socket (or similar), the `encoding` must\n not be `None` (that is, the output must be encoded)\n :return: a `str` or `unicode` object (depending on the `encoding`\n parameter), or `None` if the `out` parameter is provided\n \n :since: version 0.4.1\n :note: Changed in 0.5: added the `out` parameter\n '
if (encoding is not None):
errors = 'replace'
if ((method != 'text') and (not isinstance(method, TextSerializer))):
errors = 'xmlcharrefreplace'
_encode = (lambda string: string.encode(encoding, errors))
else:
_encode = (lambda string: string)
if (out is None):
return _encode(.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk)) |
def get_serializer(method='xml', **kwargs):
'Return a serializer object for the given method.\n \n :param method: the serialization method; can be either "xml", "xhtml",\n "html", "text", or a custom serializer class\n\n Any additional keyword arguments are passed to the serializer, and thus\n depend on the `method` parameter value.\n \n :see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`\n :since: version 0.4.1\n '
if isinstance(method, basestring):
method = {'xml': XMLSerializer, 'xhtml': XHTMLSerializer, 'html': HTMLSerializer, 'text': TextSerializer}[method.lower()]
return method(**kwargs) | 1,971,087,575,448,008,400 | Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1 | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | get_serializer | 262877348/Data | python | def get_serializer(method='xml', **kwargs):
'Return a serializer object for the given method.\n \n :param method: the serialization method; can be either "xml", "xhtml",\n "html", "text", or a custom serializer class\n\n Any additional keyword arguments are passed to the serializer, and thus\n depend on the `method` parameter value.\n \n :see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`\n :since: version 0.4.1\n '
if isinstance(method, basestring):
method = {'xml': XMLSerializer, 'xhtml': XHTMLSerializer, 'html': HTMLSerializer, 'text': TextSerializer}[method.lower()]
return method(**kwargs) |
def _prepare_cache(use_cache=True):
'Prepare a private token serialization cache.\n\n :param use_cache: boolean indicating whether a real cache should\n be used or not. If not, the returned functions\n are no-ops.\n\n :return: emit and get functions, for storing and retrieving\n serialized values from the cache.\n '
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[(kind, input)] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return (_emit, _get, cache) | 5,364,588,915,978,782,000 | Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache. | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | _prepare_cache | 262877348/Data | python | def _prepare_cache(use_cache=True):
'Prepare a private token serialization cache.\n\n :param use_cache: boolean indicating whether a real cache should\n be used or not. If not, the returned functions\n are no-ops.\n\n :return: emit and get functions, for storing and retrieving\n serialized values from the cache.\n '
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[(kind, input)] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return (_emit, _get, cache) |
@classmethod
def get(cls, name):
'Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``\n declaration for the specified name.\n \n The following names are recognized in this version:\n * "html" or "html-strict" for the HTML 4.01 strict DTD\n * "html-transitional" for the HTML 4.01 transitional DTD\n * "html-frameset" for the HTML 4.01 frameset DTD\n * "html5" for the ``DOCTYPE`` proposed for HTML5\n * "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD\n * "xhtml-transitional" for the XHTML 1.0 transitional DTD\n * "xhtml-frameset" for the XHTML 1.0 frameset DTD\n * "xhtml11" for the XHTML 1.1 DTD\n * "svg" or "svg-full" for the SVG 1.1 DTD\n * "svg-basic" for the SVG Basic 1.1 DTD\n * "svg-tiny" for the SVG Tiny 1.1 DTD\n \n :param name: the name of the ``DOCTYPE``\n :return: the ``(name, pubid, sysid)`` tuple for the requested\n ``DOCTYPE``, or ``None`` if the name is not recognized\n :since: version 0.4.1\n '
return {'html': cls.HTML, 'html-strict': cls.HTML_STRICT, 'html-transitional': DocType.HTML_TRANSITIONAL, 'html-frameset': DocType.HTML_FRAMESET, 'html5': cls.HTML5, 'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT, 'xhtml-transitional': cls.XHTML_TRANSITIONAL, 'xhtml-frameset': cls.XHTML_FRAMESET, 'xhtml11': cls.XHTML11, 'svg': cls.SVG, 'svg-full': cls.SVG_FULL, 'svg-basic': cls.SVG_BASIC, 'svg-tiny': cls.SVG_TINY}.get(name.lower()) | 2,591,025,277,008,289,000 | Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1 | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | get | 262877348/Data | python | @classmethod
def get(cls, name):
'Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``\n declaration for the specified name.\n \n The following names are recognized in this version:\n * "html" or "html-strict" for the HTML 4.01 strict DTD\n * "html-transitional" for the HTML 4.01 transitional DTD\n * "html-frameset" for the HTML 4.01 frameset DTD\n * "html5" for the ``DOCTYPE`` proposed for HTML5\n * "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD\n * "xhtml-transitional" for the XHTML 1.0 transitional DTD\n * "xhtml-frameset" for the XHTML 1.0 frameset DTD\n * "xhtml11" for the XHTML 1.1 DTD\n * "svg" or "svg-full" for the SVG 1.1 DTD\n * "svg-basic" for the SVG Basic 1.1 DTD\n * "svg-tiny" for the SVG Tiny 1.1 DTD\n \n :param name: the name of the ``DOCTYPE``\n :return: the ``(name, pubid, sysid)`` tuple for the requested\n ``DOCTYPE``, or ``None`` if the name is not recognized\n :since: version 0.4.1\n '
return {'html': cls.HTML, 'html-strict': cls.HTML_STRICT, 'html-transitional': DocType.HTML_TRANSITIONAL, 'html-frameset': DocType.HTML_FRAMESET, 'html5': cls.HTML5, 'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT, 'xhtml-transitional': cls.XHTML_TRANSITIONAL, 'xhtml-frameset': cls.XHTML_FRAMESET, 'xhtml11': cls.XHTML11, 'svg': cls.SVG, 'svg-full': cls.SVG_FULL, 'svg-basic': cls.SVG_BASIC, 'svg-tiny': cls.SVG_TINY}.get(name.lower()) |
def __init__(self, doctype=None, strip_whitespace=True, namespace_prefixes=None, cache=True):
'Initialize the XML serializer.\n \n :param doctype: a ``(name, pubid, sysid)`` tuple that represents the\n DOCTYPE declaration that should be included at the top\n of the generated output, or the name of a DOCTYPE as\n defined in `DocType.get`\n :param strip_whitespace: whether extraneous whitespace should be\n stripped from the output\n :param cache: whether to cache the text output per event, which\n improves performance for repetitive markup\n :note: Changed in 0.4.2: The `doctype` parameter can now be a string.\n :note: Changed in 0.6: The `cache` parameter was added\n '
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache | -4,096,005,298,788,750,000 | Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | __init__ | 262877348/Data | python | def __init__(self, doctype=None, strip_whitespace=True, namespace_prefixes=None, cache=True):
'Initialize the XML serializer.\n \n :param doctype: a ``(name, pubid, sysid)`` tuple that represents the\n DOCTYPE declaration that should be included at the top\n of the generated output, or the name of a DOCTYPE as\n defined in `DocType.get`\n :param strip_whitespace: whether extraneous whitespace should be\n stripped from the output\n :param cache: whether to cache the text output per event, which\n improves performance for repetitive markup\n :note: Changed in 0.4.2: The `doctype` parameter can now be a string.\n :note: Changed in 0.6: The `cache` parameter was added\n '
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache |
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
'Initialize the HTML serializer.\n \n :param doctype: a ``(name, pubid, sysid)`` tuple that represents the\n DOCTYPE declaration that should be included at the top\n of the generated output\n :param strip_whitespace: whether extraneous whitespace should be\n stripped from the output\n :param cache: whether to cache the text output per event, which\n improves performance for repetitive markup\n :note: Changed in 0.6: The `cache` parameter was added\n '
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE, self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={'http://www.w3.org/1999/xhtml': ''}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True | 8,366,487,007,270,063,000 | Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | __init__ | 262877348/Data | python | def __init__(self, doctype=None, strip_whitespace=True, cache=True):
'Initialize the HTML serializer.\n \n :param doctype: a ``(name, pubid, sysid)`` tuple that represents the\n DOCTYPE declaration that should be included at the top\n of the generated output\n :param strip_whitespace: whether extraneous whitespace should be\n stripped from the output\n :param cache: whether to cache the text output per event, which\n improves performance for repetitive markup\n :note: Changed in 0.6: The `cache` parameter was added\n '
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE, self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={'http://www.w3.org/1999/xhtml': }, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True |
def __init__(self, strip_markup=False):
'Create the serializer.\n \n :param strip_markup: whether markup (tags and encoded characters) found\n in the text should be removed\n '
self.strip_markup = strip_markup | 4,920,285,809,569,111,000 | Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | __init__ | 262877348/Data | python | def __init__(self, strip_markup=False):
'Create the serializer.\n \n :param strip_markup: whether markup (tags and encoded characters) found\n in the text should be removed\n '
self.strip_markup = strip_markup |
def __init__(self, preserve=None, noescape=None):
'Initialize the filter.\n \n :param preserve: a set or sequence of tag names for which white-space\n should be preserved\n :param noescape: a set or sequence of tag names for which text content\n should not be escaped\n \n The `noescape` set is expected to refer to elements that cannot contain\n further child elements (such as ``<style>`` or ``<script>`` in HTML\n documents).\n '
if (preserve is None):
preserve = []
self.preserve = frozenset(preserve)
if (noescape is None):
noescape = []
self.noescape = frozenset(noescape) | -8,983,873,959,590,232,000 | Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents). | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | __init__ | 262877348/Data | python | def __init__(self, preserve=None, noescape=None):
'Initialize the filter.\n \n :param preserve: a set or sequence of tag names for which white-space\n should be preserved\n :param noescape: a set or sequence of tag names for which text content\n should not be escaped\n \n The `noescape` set is expected to refer to elements that cannot contain\n further child elements (such as ``<style>`` or ``<script>`` in HTML\n documents).\n '
if (preserve is None):
preserve = []
self.preserve = frozenset(preserve)
if (noescape is None):
noescape = []
self.noescape = frozenset(noescape) |
def __init__(self, doctype):
'Initialize the filter.\n\n :param doctype: DOCTYPE as a string or DocType object.\n '
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, (- 1), (- 1))) | -3,173,367,274,843,465,000 | Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object. | Packages/OmniMarkupPreviewer/OmniMarkupLib/Renderers/libs/python2/genshi/output.py | __init__ | 262877348/Data | python | def __init__(self, doctype):
'Initialize the filter.\n\n :param doctype: DOCTYPE as a string or DocType object.\n '
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, (- 1), (- 1))) |
def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
'\n Columns that are correlated to the target point\n\n Parameters\n ----------\n\n dataset: pd.DataFrame\n The pandas dataframe\n \n target_column: str\n The target column to calculate correlation against\n\n k: float\n The correlation cuttoff point; defaults to -0.5 and 0.5.\n The values passed in represents the negative and positive cutofff\n\n Returns\n -------\n\n columns: list\n A list of columns that are correlated to the target column based on the cutoff point\n '
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for (col, value) in zip(corr_sorted.index, corr_sorted.values) if ((value >= k) and (col != target_column))]
return columns | 1,533,437,794,607,541,500 | Columns that are correlated to the target point
Parameters
----------
dataset: pd.DataFrame
The pandas dataframe
target_column: str
The target column to calculate correlation against
k: float
The correlation cuttoff point; defaults to -0.5 and 0.5.
The values passed in represents the negative and positive cutofff
Returns
-------
columns: list
A list of columns that are correlated to the target column based on the cutoff point | credit-card-fraud/src/features/build_features.py | correlation_columns | samie-hash/data-science-repo | python | def correlation_columns(dataset: pd.DataFrame, target_column: str, k: float=0.5):
'\n Columns that are correlated to the target point\n\n Parameters\n ----------\n\n dataset: pd.DataFrame\n The pandas dataframe\n \n target_column: str\n The target column to calculate correlation against\n\n k: float\n The correlation cuttoff point; defaults to -0.5 and 0.5.\n The values passed in represents the negative and positive cutofff\n\n Returns\n -------\n\n columns: list\n A list of columns that are correlated to the target column based on the cutoff point\n '
corr = np.abs(dataset.corr()[target_column])
corr_sorted = corr.sort_values(ascending=False)
columns = [col for (col, value) in zip(corr_sorted.index, corr_sorted.values) if ((value >= k) and (col != target_column))]
return columns |
def cummin(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative minimum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative minimum.\n\n .. note:: the current implementation of cummin uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.min : Return the minimum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n Series.min : Return the minimum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the minimum in each column.\n\n >>> df.cummin()\n A B\n 0 2.0 1.0\n 1 2.0 NaN\n 2 1.0 0.0\n\n It works identically in Series.\n\n >>> df.A.cummin()\n 0 2.0\n 1 2.0\n 2 1.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cum(F.min, skipna)), should_resolve=True) | -2,685,636,878,631,528,400 | Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64 | python/pyspark/pandas/generic.py | cummin | XpressAI/spark | python | def cummin(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative minimum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative minimum.\n\n .. note:: the current implementation of cummin uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.min : Return the minimum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n Series.min : Return the minimum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the minimum in each column.\n\n >>> df.cummin()\n A B\n 0 2.0 1.0\n 1 2.0 NaN\n 2 1.0 0.0\n\n It works identically in Series.\n\n >>> df.A.cummin()\n 0 2.0\n 1 2.0\n 2 1.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cum(F.min, skipna)), should_resolve=True) |
def cummax(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative maximum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative maximum.\n\n .. note:: the current implementation of cummax uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.max : Return the maximum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.max : Return the maximum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the maximum in each column.\n\n >>> df.cummax()\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 3.0 1.0\n\n It works identically in Series.\n\n >>> df.B.cummax()\n 0 1.0\n 1 NaN\n 2 1.0\n Name: B, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cum(F.max, skipna)), should_resolve=True) | -3,348,748,663,714,688,000 | Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64 | python/pyspark/pandas/generic.py | cummax | XpressAI/spark | python | def cummax(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative maximum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative maximum.\n\n .. note:: the current implementation of cummax uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.max : Return the maximum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.max : Return the maximum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the maximum in each column.\n\n >>> df.cummax()\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 3.0 1.0\n\n It works identically in Series.\n\n >>> df.B.cummax()\n 0 1.0\n 1 NaN\n 2 1.0\n Name: B, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cum(F.max, skipna)), should_resolve=True) |
def cumsum(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative sum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative sum.\n\n .. note:: the current implementation of cumsum uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.sum : Return the sum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.sum : Return the sum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumsum()\n A B\n 0 2.0 1.0\n 1 5.0 NaN\n 2 6.0 1.0\n\n It works identically in Series.\n\n >>> df.A.cumsum()\n 0 2.0\n 1 5.0\n 2 6.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cumsum(skipna)), should_resolve=True) | -8,141,575,604,497,648,000 | Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64 | python/pyspark/pandas/generic.py | cumsum | XpressAI/spark | python | def cumsum(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative sum over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative sum.\n\n .. note:: the current implementation of cumsum uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.sum : Return the sum over DataFrame axis.\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.sum : Return the sum over Series axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 1.0 0.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumsum()\n A B\n 0 2.0 1.0\n 1 5.0 NaN\n 2 6.0 1.0\n\n It works identically in Series.\n\n >>> df.A.cumsum()\n 0 2.0\n 1 5.0\n 2 6.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cumsum(skipna)), should_resolve=True) |
def cumprod(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative product over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative product.\n\n .. note:: the current implementation of cumprod uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n .. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by\n ``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Raises\n ------\n Exception : If the values is equal to or lower than 0.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 4.0 10.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumprod()\n A B\n 0 2.0 1.0\n 1 6.0 NaN\n 2 24.0 10.0\n\n It works identically in Series.\n\n >>> df.A.cumprod()\n 0 2.0\n 1 6.0\n 2 24.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cumprod(skipna)), should_resolve=True) | 1,569,474,608,944,173,600 | Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64 | python/pyspark/pandas/generic.py | cumprod | XpressAI/spark | python | def cumprod(self: FrameLike, skipna: bool=True) -> FrameLike:
"\n Return cumulative product over a DataFrame or Series axis.\n\n Returns a DataFrame or Series of the same size containing the cumulative product.\n\n .. note:: the current implementation of cumprod uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n .. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by\n ``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values. If an entire row/column is NA, the result will be NA.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.cummax : Return cumulative maximum over DataFrame axis.\n DataFrame.cummin : Return cumulative minimum over DataFrame axis.\n DataFrame.cumsum : Return cumulative sum over DataFrame axis.\n DataFrame.cumprod : Return cumulative product over DataFrame axis.\n Series.cummax : Return cumulative maximum over Series axis.\n Series.cummin : Return cumulative minimum over Series axis.\n Series.cumsum : Return cumulative sum over Series axis.\n Series.cumprod : Return cumulative product over Series axis.\n\n Raises\n ------\n Exception : If the values is equal to or lower than 0.\n\n Examples\n --------\n >>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))\n >>> df\n A B\n 0 2.0 1.0\n 1 3.0 NaN\n 2 4.0 10.0\n\n By default, iterates over rows and finds the sum in each column.\n\n >>> df.cumprod()\n A B\n 0 2.0 1.0\n 1 6.0 NaN\n 2 24.0 10.0\n\n It works identically in Series.\n\n >>> df.A.cumprod()\n 0 2.0\n 1 6.0\n 2 24.0\n Name: A, dtype: float64\n "
return self._apply_series_op((lambda psser: psser._cumprod(skipna)), should_resolve=True) |
def get_dtype_counts(self) -> pd.Series:
"\n Return counts of unique dtypes in this object.\n\n .. deprecated:: 0.14.0\n\n Returns\n -------\n dtype : pd.Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]\n >>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])\n >>> df\n str int1 int2\n 0 a 1 1\n 1 b 2 2\n 2 c 3 3\n\n >>> df.get_dtype_counts().sort_values()\n object 1\n int64 2\n dtype: int64\n\n >>> df.str.get_dtype_counts().sort_values()\n object 1\n dtype: int64\n "
warnings.warn('`get_dtype_counts` has been deprecated and will be removed in a future version. For DataFrames use `.dtypes.value_counts()', FutureWarning)
if (not isinstance(self.dtypes, Iterable)):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes]))) | 9,206,764,287,967,669,000 | Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64 | python/pyspark/pandas/generic.py | get_dtype_counts | XpressAI/spark | python | def get_dtype_counts(self) -> pd.Series:
"\n Return counts of unique dtypes in this object.\n\n .. deprecated:: 0.14.0\n\n Returns\n -------\n dtype : pd.Series\n Series with the count of columns with each dtype.\n\n See Also\n --------\n dtypes : Return the dtypes in this object.\n\n Examples\n --------\n >>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]\n >>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])\n >>> df\n str int1 int2\n 0 a 1 1\n 1 b 2 2\n 2 c 3 3\n\n >>> df.get_dtype_counts().sort_values()\n object 1\n int64 2\n dtype: int64\n\n >>> df.str.get_dtype_counts().sort_values()\n object 1\n dtype: int64\n "
warnings.warn('`get_dtype_counts` has been deprecated and will be removed in a future version. For DataFrames use `.dtypes.value_counts()', FutureWarning)
if (not isinstance(self.dtypes, Iterable)):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes]))) |
def pipe(self, func: Callable[(..., Any)], *args: Any, **kwargs: Any) -> Any:
'\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the DataFrame.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the DataFrames.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. For example, given\n\n >>> df = ps.DataFrame({\'category\': [\'A\', \'A\', \'B\'],\n ... \'col1\': [1, 2, 3],\n ... \'col2\': [4, 5, 6]},\n ... columns=[\'category\', \'col1\', \'col2\'])\n >>> def keep_category_a(df):\n ... return df[df[\'category\'] == \'A\']\n >>> def add_one(df, column):\n ... return df.assign(col3=df[column] + 1)\n >>> def multiply(df, column1, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n instead of writing\n\n >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n You can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column="col1")\n ... .pipe(multiply, column1="col2", column2="col3")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``df``:\n\n >>> def multiply_2(column1, df, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n Then you can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column="col1")\n ... .pipe((multiply_2, \'df\'), column1="col2", column2="col3")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n You can use lambda as wel\n\n >>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))\n 0 2\n 1 3\n 2 4\n Name: value, dtype: int64\n '
if isinstance(func, tuple):
(func, target) = func
if (target in kwargs):
raise ValueError(('%s is both the pipe target and a keyword argument' % target))
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs) | -5,945,533,546,538,242,000 | Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64 | python/pyspark/pandas/generic.py | pipe | XpressAI/spark | python | def pipe(self, func: Callable[(..., Any)], *args: Any, **kwargs: Any) -> Any:
'\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the DataFrame.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the DataFrames.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. For example, given\n\n >>> df = ps.DataFrame({\'category\': [\'A\', \'A\', \'B\'],\n ... \'col1\': [1, 2, 3],\n ... \'col2\': [4, 5, 6]},\n ... columns=[\'category\', \'col1\', \'col2\'])\n >>> def keep_category_a(df):\n ... return df[df[\'category\'] == \'A\']\n >>> def add_one(df, column):\n ... return df.assign(col3=df[column] + 1)\n >>> def multiply(df, column1, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n instead of writing\n\n >>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n You can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column="col1")\n ... .pipe(multiply, column1="col2", column2="col3")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``df``:\n\n >>> def multiply_2(column1, df, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n Then you can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column="col1")\n ... .pipe((multiply_2, \'df\'), column1="col2", column2="col3")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n You can use lambda as wel\n\n >>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))\n 0 2\n 1 3\n 2 4\n Name: value, dtype: int64\n '
if isinstance(func, tuple):
(func, target) = func
if (target in kwargs):
raise ValueError(('%s is both the pipe target and a keyword argument' % target))
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs) |
def to_numpy(self) -> np.ndarray:
'\n A NumPy ndarray representing the values in this DataFrame or Series.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to be used.\n\n >>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will have object dtype.\n\n >>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range(\'2000\', periods=2)})\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp(\'2000-01-01 00:00:00\')],\n [2, 4.5, Timestamp(\'2000-01-02 00:00:00\')]], dtype=object)\n\n For Series,\n\n >>> ps.Series([\'a\', \'b\', \'a\']).to_numpy()\n array([\'a\', \'b\', \'a\'], dtype=object)\n '
return self.to_pandas().values | 3,172,926,021,327,149,600 | A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object) | python/pyspark/pandas/generic.py | to_numpy | XpressAI/spark | python | def to_numpy(self) -> np.ndarray:
'\n A NumPy ndarray representing the values in this DataFrame or Series.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to be used.\n\n >>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will have object dtype.\n\n >>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range(\'2000\', periods=2)})\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp(\'2000-01-01 00:00:00\')],\n [2, 4.5, Timestamp(\'2000-01-02 00:00:00\')]], dtype=object)\n\n For Series,\n\n >>> ps.Series([\'a\', \'b\', \'a\']).to_numpy()\n array([\'a\', \'b\', \'a\'], dtype=object)\n '
return self.to_pandas().values |
@property
def values(self) -> np.ndarray:
"\n Return a Numpy representation of the DataFrame or the Series.\n\n .. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results in an array of\n the same type.\n\n >>> df = ps.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]])\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray\n of the broadest type that accommodates these mixed types (e.g., object).\n\n >>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 'first'),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 'first'],\n ['monkey', nan, None]], dtype=object)\n\n For Series,\n\n >>> ps.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> ps.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n "
warnings.warn('We recommend using `{}.to_numpy()` instead.'.format(type(self).__name__))
return self.to_numpy() | -1,081,172,129,595,538,400 | Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object) | python/pyspark/pandas/generic.py | values | XpressAI/spark | python | @property
def values(self) -> np.ndarray:
"\n Return a Numpy representation of the DataFrame or the Series.\n\n .. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results in an array of\n the same type.\n\n >>> df = ps.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]])\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray\n of the broadest type that accommodates these mixed types (e.g., object).\n\n >>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 'first'),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 'first'],\n ['monkey', nan, None]], dtype=object)\n\n For Series,\n\n >>> ps.Series([1, 2, 3]).values\n array([1, 2, 3])\n\n >>> ps.Series(list('aabc')).values\n array(['a', 'a', 'b', 'c'], dtype=object)\n "
warnings.warn('We recommend using `{}.to_numpy()` instead.'.format(type(self).__name__))
return self.to_numpy() |
def to_csv(self, path: Optional[str]=None, sep: str=',', na_rep: str='', columns: Optional[List[Union[(Any, Tuple)]]]=None, header: bool=True, quotechar: str='"', date_format: Optional[str]=None, escapechar: Optional[str]=None, num_files: Optional[int]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> Optional[str]:
'\n Write object to a comma-separated values (csv) file.\n\n .. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas\',\n pandas-on-Spark respects HDFS\'s property such as \'fs.default.name\'.\n\n .. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n Parameters\n ----------\n path : str, default None\n File path. If None is provided the result is returned as a string.\n sep : str, default \',\'\n String of length 1. Field delimiter for the output file.\n na_rep : str, default \'\'\n Missing data representation.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n quotechar : str, default \'\\"\'\n String of length 1. Character used to quote fields.\n date_format : str, default None\n Format string for datetime objects.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {\'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'},\n default \'overwrite\'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - \'append\': Append the new data to existing data.\n - \'overwrite\': Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n This kwargs are specific to PySpark\'s CSV options to pass. Check\n the options in PySpark\'s API documentation for spark.write.csv(...).\n It has higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n -------\n str or None\n\n See Also\n --------\n read_csv\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_parquet\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range(\'2012-1-1 12:00:00\', periods=3, freq=\'M\')),\n ... country=[\'KR\', \'US\', \'JP\'],\n ... code=[1, 2 ,3]), columns=[\'date\', \'country\', \'code\'])\n >>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 JP 3\n\n >>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date,country,code\n 2012-01-31 12:00:00,KR,1\n 2012-02-29 12:00:00,US,2\n 2012-03-31 12:00:00,JP,3\n\n >>> df.cummax().to_csv(path=r\'%s/to_csv/foo.csv\' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/foo.csv\' % path\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 US 3\n\n In case of Series,\n\n >>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date\n 2012-01-31 12:00:00\n 2012-02-29 12:00:00\n 2012-03-31 12:00:00\n\n >>> df.date.to_csv(path=r\'%s/to_csv/foo.csv\' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/foo.csv\' % path\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n ... 2012-01-31 12:00:00\n ... 2012-02-29 12:00:00\n ... 2012-03-31 12:00:00\n\n You can preserve the index in the roundtrip as below.\n\n >>> df.set_index("country", append=True, inplace=True)\n >>> df.date.to_csv(\n ... path=r\'%s/to_csv/bar.csv\' % path,\n ... num_files=1,\n ... index_col=["index1", "index2"])\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/bar.csv\' % path, index_col=["index1", "index2"]\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n index1 index2\n ... ... 2012-01-31 12:00:00\n ... ... 2012-02-29 12:00:00\n ... ... 2012-03-31 12:00:00\n '
if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)):
options = options.get('options')
if (path is None):
psdf_or_ser = self
if ((LooseVersion('0.24') > LooseVersion(pd.__version__)) and isinstance(self, ps.Series)):
return psdf_or_ser.to_pandas().to_csv(None, sep=sep, na_rep=na_rep, header=header, date_format=date_format, index=False)
else:
return psdf_or_ser.to_pandas().to_csv(None, sep=sep, na_rep=na_rep, columns=columns, header=header, quotechar=quotechar, date_format=date_format, escapechar=escapechar, index=False)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if (columns is None):
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if (not is_name_like_tuple(label)):
label = (label,)
if (label not in psdf._internal.column_labels):
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif (index_col is None):
index_cols = []
else:
index_cols = index_col
if ((header is True) and (psdf._internal.column_labels_level > 1)):
raise ValueError('to_csv only support one-level index column now')
elif isinstance(header, list):
sdf = psdf.to_spark(index_col)
sdf = sdf.select(([scol_for(sdf, name_like_string(label)) for label in index_cols] + [scol_for(sdf, (str(i) if (label is None) else name_like_string(label))).alias(new_name) for (i, (label, new_name)) in enumerate(zip(column_labels, header))]))
header = True
else:
sdf = psdf.to_spark(index_col)
sdf = sdf.select(([scol_for(sdf, name_like_string(label)) for label in index_cols] + [scol_for(sdf, (str(i) if (label is None) else name_like_string(label))) for (i, label) in enumerate(column_labels)]))
if (num_files is not None):
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if (partition_cols is not None):
builder.partitionBy(partition_cols)
builder._set_opts(sep=sep, nullValue=na_rep, header=header, quote=quotechar, dateFormat=date_format, charToEscapeQuoteEscaping=escapechar)
builder.options(**options).format('csv').save(path)
return None | 4,511,092,456,395,762,000 | Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00 | python/pyspark/pandas/generic.py | to_csv | XpressAI/spark | python | def to_csv(self, path: Optional[str]=None, sep: str=',', na_rep: str=, columns: Optional[List[Union[(Any, Tuple)]]]=None, header: bool=True, quotechar: str='"', date_format: Optional[str]=None, escapechar: Optional[str]=None, num_files: Optional[int]=None, mode: str='overwrite', partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> Optional[str]:
'\n Write object to a comma-separated values (csv) file.\n\n .. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas\',\n pandas-on-Spark respects HDFS\'s property such as \'fs.default.name\'.\n\n .. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n Parameters\n ----------\n path : str, default None\n File path. If None is provided the result is returned as a string.\n sep : str, default \',\'\n String of length 1. Field delimiter for the output file.\n na_rep : str, default \'\'\n Missing data representation.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n quotechar : str, default \'\\"\'\n String of length 1. Character used to quote fields.\n date_format : str, default None\n Format string for datetime objects.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {\'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'},\n default \'overwrite\'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - \'append\': Append the new data to existing data.\n - \'overwrite\': Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n This kwargs are specific to PySpark\'s CSV options to pass. Check\n the options in PySpark\'s API documentation for spark.write.csv(...).\n It has higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n -------\n str or None\n\n See Also\n --------\n read_csv\n DataFrame.to_delta\n DataFrame.to_table\n DataFrame.to_parquet\n DataFrame.to_spark_io\n\n Examples\n --------\n >>> df = ps.DataFrame(dict(\n ... date=list(pd.date_range(\'2012-1-1 12:00:00\', periods=3, freq=\'M\')),\n ... country=[\'KR\', \'US\', \'JP\'],\n ... code=[1, 2 ,3]), columns=[\'date\', \'country\', \'code\'])\n >>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 JP 3\n\n >>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date,country,code\n 2012-01-31 12:00:00,KR,1\n 2012-02-29 12:00:00,US,2\n 2012-03-31 12:00:00,JP,3\n\n >>> df.cummax().to_csv(path=r\'%s/to_csv/foo.csv\' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/foo.csv\' % path\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date country code\n ... 2012-01-31 12:00:00 KR 1\n ... 2012-02-29 12:00:00 US 2\n ... 2012-03-31 12:00:00 US 3\n\n In case of Series,\n\n >>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE\n date\n 2012-01-31 12:00:00\n 2012-02-29 12:00:00\n 2012-03-31 12:00:00\n\n >>> df.date.to_csv(path=r\'%s/to_csv/foo.csv\' % path, num_files=1)\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/foo.csv\' % path\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n ... 2012-01-31 12:00:00\n ... 2012-02-29 12:00:00\n ... 2012-03-31 12:00:00\n\n You can preserve the index in the roundtrip as below.\n\n >>> df.set_index("country", append=True, inplace=True)\n >>> df.date.to_csv(\n ... path=r\'%s/to_csv/bar.csv\' % path,\n ... num_files=1,\n ... index_col=["index1", "index2"])\n >>> ps.read_csv(\n ... path=r\'%s/to_csv/bar.csv\' % path, index_col=["index1", "index2"]\n ... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n date\n index1 index2\n ... ... 2012-01-31 12:00:00\n ... ... 2012-02-29 12:00:00\n ... ... 2012-03-31 12:00:00\n '
if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)):
options = options.get('options')
if (path is None):
psdf_or_ser = self
if ((LooseVersion('0.24') > LooseVersion(pd.__version__)) and isinstance(self, ps.Series)):
return psdf_or_ser.to_pandas().to_csv(None, sep=sep, na_rep=na_rep, header=header, date_format=date_format, index=False)
else:
return psdf_or_ser.to_pandas().to_csv(None, sep=sep, na_rep=na_rep, columns=columns, header=header, quotechar=quotechar, date_format=date_format, escapechar=escapechar, index=False)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if (columns is None):
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if (not is_name_like_tuple(label)):
label = (label,)
if (label not in psdf._internal.column_labels):
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif (index_col is None):
index_cols = []
else:
index_cols = index_col
if ((header is True) and (psdf._internal.column_labels_level > 1)):
raise ValueError('to_csv only support one-level index column now')
elif isinstance(header, list):
sdf = psdf.to_spark(index_col)
sdf = sdf.select(([scol_for(sdf, name_like_string(label)) for label in index_cols] + [scol_for(sdf, (str(i) if (label is None) else name_like_string(label))).alias(new_name) for (i, (label, new_name)) in enumerate(zip(column_labels, header))]))
header = True
else:
sdf = psdf.to_spark(index_col)
sdf = sdf.select(([scol_for(sdf, name_like_string(label)) for label in index_cols] + [scol_for(sdf, (str(i) if (label is None) else name_like_string(label))) for (i, label) in enumerate(column_labels)]))
if (num_files is not None):
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if (partition_cols is not None):
builder.partitionBy(partition_cols)
builder._set_opts(sep=sep, nullValue=na_rep, header=header, quote=quotechar, dateFormat=date_format, charToEscapeQuoteEscaping=escapechar)
builder.options(**options).format('csv').save(path)
return None |
def to_json(self, path: Optional[str]=None, compression: str='uncompressed', num_files: Optional[int]=None, mode: str='overwrite', orient: str='records', lines: bool=True, partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> Optional[str]:
'\n Convert the object to a JSON string.\n\n .. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas\',\n pandas-on-Spark respects HDFS\'s property such as \'fs.default.name\'.\n\n .. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n .. note:: output JSON format is different from pandas\'. It always use `orient=\'records\'`\n for its output. This behaviour might have to change in the near future.\n\n Note NaN\'s and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path : string, optional\n File path. If not specified, the result is returned as\n a string.\n lines : bool, default True\n If ‘orient’ is ‘records’ write out line delimited json format.\n Will throw ValueError if incorrect ‘orient’ since others are not\n list like. It should be always True for now.\n orient : str, default \'records\'\n It should be always \'records\' for now.\n compression : {\'gzip\', \'bz2\', \'xz\', None}\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {\'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'},\n default \'overwrite\'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - \'append\': Append the new data to existing data.\n - \'overwrite\': Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n It is specific to PySpark\'s JSON options to pass. Check\n the options in PySpark\'s API documentation for `spark.write.json(...)`.\n It has a higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n --------\n str or None\n\n Examples\n --------\n >>> df = ps.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df.to_json()\n \'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]\'\n\n >>> df[\'col 1\'].to_json()\n \'[{"col 1":"a"},{"col 1":"c"}]\'\n\n >>> df.to_json(path=r\'%s/to_json/foo.json\' % path, num_files=1)\n >>> ps.read_json(\n ... path=r\'%s/to_json/foo.json\' % path\n ... ).sort_values(by="col 1")\n col 1 col 2\n 0 a b\n 1 c d\n\n >>> df[\'col 1\'].to_json(path=r\'%s/to_json/foo.json\' % path, num_files=1, index_col="index")\n >>> ps.read_json(\n ... path=r\'%s/to_json/foo.json\' % path, index_col="index"\n ... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE\n col 1\n index\n 0 a\n 1 c\n '
if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)):
options = options.get('options')
if (not lines):
raise NotImplementedError('lines=False is not implemented yet.')
if (orient != 'records'):
raise NotImplementedError("orient='records' is supported only for now.")
if (path is None):
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas()
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
return pdf.to_json(orient='records')
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col)
if (num_files is not None):
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if (partition_cols is not None):
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format('json').save(path)
return None | 4,444,707,189,741,475,000 | Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c | python/pyspark/pandas/generic.py | to_json | XpressAI/spark | python | def to_json(self, path: Optional[str]=None, compression: str='uncompressed', num_files: Optional[int]=None, mode: str='overwrite', orient: str='records', lines: bool=True, partition_cols: Optional[Union[(str, List[str])]]=None, index_col: Optional[Union[(str, List[str])]]=None, **options: Any) -> Optional[str]:
'\n Convert the object to a JSON string.\n\n .. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas\',\n pandas-on-Spark respects HDFS\'s property such as \'fs.default.name\'.\n\n .. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes\n multiple `part-...` files in the directory when `path` is specified.\n This behaviour was inherited from Apache Spark. The number of files can\n be controlled by `num_files`.\n\n .. note:: output JSON format is different from pandas\'. It always use `orient=\'records\'`\n for its output. This behaviour might have to change in the near future.\n\n Note NaN\'s and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path : string, optional\n File path. If not specified, the result is returned as\n a string.\n lines : bool, default True\n If ‘orient’ is ‘records’ write out line delimited json format.\n Will throw ValueError if incorrect ‘orient’ since others are not\n list like. It should be always True for now.\n orient : str, default \'records\'\n It should be always \'records\' for now.\n compression : {\'gzip\', \'bz2\', \'xz\', None}\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n num_files : the number of files to be written in `path` directory when\n this is a path.\n mode : str {\'append\', \'overwrite\', \'ignore\', \'error\', \'errorifexists\'},\n default \'overwrite\'. Specifies the behavior of the save operation when the\n destination exists already.\n\n - \'append\': Append the new data to existing data.\n - \'overwrite\': Overwrite existing data.\n - \'ignore\': Silently ignore this operation if data already exists.\n - \'error\' or \'errorifexists\': Throw an exception if data already exists.\n\n partition_cols : str or list of str, optional, default None\n Names of partitioning columns\n index_col: str or list of str, optional, default: None\n Column names to be used in Spark to represent pandas-on-Spark\'s index. The index name\n in pandas-on-Spark is ignored. By default, the index is always lost.\n options: keyword arguments for additional options specific to PySpark.\n It is specific to PySpark\'s JSON options to pass. Check\n the options in PySpark\'s API documentation for `spark.write.json(...)`.\n It has a higher priority and overwrites all other options.\n This parameter only works when `path` is specified.\n\n Returns\n --------\n str or None\n\n Examples\n --------\n >>> df = ps.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df.to_json()\n \'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]\'\n\n >>> df[\'col 1\'].to_json()\n \'[{"col 1":"a"},{"col 1":"c"}]\'\n\n >>> df.to_json(path=r\'%s/to_json/foo.json\' % path, num_files=1)\n >>> ps.read_json(\n ... path=r\'%s/to_json/foo.json\' % path\n ... ).sort_values(by="col 1")\n col 1 col 2\n 0 a b\n 1 c d\n\n >>> df[\'col 1\'].to_json(path=r\'%s/to_json/foo.json\' % path, num_files=1, index_col="index")\n >>> ps.read_json(\n ... path=r\'%s/to_json/foo.json\' % path, index_col="index"\n ... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE\n col 1\n index\n 0 a\n 1 c\n '
if (('options' in options) and isinstance(options.get('options'), dict) and (len(options) == 1)):
options = options.get('options')
if (not lines):
raise NotImplementedError('lines=False is not implemented yet.')
if (orient != 'records'):
raise NotImplementedError("orient='records' is supported only for now.")
if (path is None):
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas()
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
return pdf.to_json(orient='records')
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col)
if (num_files is not None):
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if (partition_cols is not None):
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format('json').save(path)
return None |
def to_excel(self, excel_writer: Union[(str, pd.ExcelWriter)], sheet_name: str='Sheet1', na_rep: str='', float_format: Optional[str]=None, columns: Optional[Union[(str, List[str])]]=None, header: bool=True, index: bool=True, index_label: Optional[Union[(str, List[str])]]=None, startrow: int=0, startcol: int=0, engine: Optional[str]=None, merge_cells: bool=True, encoding: Optional[str]=None, inf_rep: str='inf', verbose: bool=True, freeze_panes: Optional[Tuple[(int, int)]]=None) -> None:
'\n Write object to an Excel sheet.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n To write a single object to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default \'Sheet1\'\n Name of sheet which will contain DataFrame.\n na_rep : str, default \'\'\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format="%%.2f"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, \'openpyxl\' or \'xlsxwriter\'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default \'inf\'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n Notes\n -----\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n See Also\n --------\n read_excel : Read Excel file.\n\n Examples\n --------\n Create, write to and save a workbook:\n\n >>> df1 = ps.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... index=[\'row 1\', \'row 2\'],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df1.to_excel("output.xlsx") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel("output.xlsx") # doctest: +SKIP\n >>> df1.to_excel("output.xlsx",\n ... sheet_name=\'Sheet_name_1\') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> with pd.ExcelWriter(\'output.xlsx\') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name=\'Sheet_name_1\')\n ... df2.to_excel(writer, sheet_name=\'Sheet_name_2\')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel(\'output1.xlsx\', engine=\'xlsxwriter\') # doctest: +SKIP\n '
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(('Constructor expects DataFrame or Series; however, got [%s]' % (self,)))
return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_excel, f, args) | 1,914,719,261,915,198,700 | Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP | python/pyspark/pandas/generic.py | to_excel | XpressAI/spark | python | def to_excel(self, excel_writer: Union[(str, pd.ExcelWriter)], sheet_name: str='Sheet1', na_rep: str=, float_format: Optional[str]=None, columns: Optional[Union[(str, List[str])]]=None, header: bool=True, index: bool=True, index_label: Optional[Union[(str, List[str])]]=None, startrow: int=0, startcol: int=0, engine: Optional[str]=None, merge_cells: bool=True, encoding: Optional[str]=None, inf_rep: str='inf', verbose: bool=True, freeze_panes: Optional[Tuple[(int, int)]]=None) -> None:
'\n Write object to an Excel sheet.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n To write a single object to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default \'Sheet1\'\n Name of sheet which will contain DataFrame.\n na_rep : str, default \'\'\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format="%%.2f"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, \'openpyxl\' or \'xlsxwriter\'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default \'inf\'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n Notes\n -----\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n See Also\n --------\n read_excel : Read Excel file.\n\n Examples\n --------\n Create, write to and save a workbook:\n\n >>> df1 = ps.DataFrame([[\'a\', \'b\'], [\'c\', \'d\']],\n ... index=[\'row 1\', \'row 2\'],\n ... columns=[\'col 1\', \'col 2\'])\n >>> df1.to_excel("output.xlsx") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel("output.xlsx") # doctest: +SKIP\n >>> df1.to_excel("output.xlsx",\n ... sheet_name=\'Sheet_name_1\') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> with pd.ExcelWriter(\'output.xlsx\') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name=\'Sheet_name_1\')\n ... df2.to_excel(writer, sheet_name=\'Sheet_name_2\')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel(\'output1.xlsx\', engine=\'xlsxwriter\') # doctest: +SKIP\n '
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(('Constructor expects DataFrame or Series; however, got [%s]' % (self,)))
return validate_arguments_and_invoke_function(psdf._to_internal_pandas(), self.to_excel, f, args) |
def mean(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the mean of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n mean : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.mean()\n a 2.0\n b 0.2\n dtype: float64\n\n >>> df.mean(axis=1)\n 0 0.55\n 1 1.10\n 2 1.65\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].mean()\n 2.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.mean(spark_column)
return self._reduce_for_stat_function(mean, name='mean', axis=axis, numeric_only=numeric_only) | -7,254,371,689,763,669,000 | Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0 | python/pyspark/pandas/generic.py | mean | XpressAI/spark | python | def mean(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the mean of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n mean : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.mean()\n a 2.0\n b 0.2\n dtype: float64\n\n >>> df.mean(axis=1)\n 0 0.55\n 1 1.10\n 2 1.65\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].mean()\n 2.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.mean(spark_column)
return self._reduce_for_stat_function(mean, name='mean', axis=axis, numeric_only=numeric_only) |
def sum(self, axis: Optional[Axis]=None, numeric_only: bool=None, min_count: int=0) -> Union[(Scalar, 'Series')]:
"\n Return the sum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Returns\n -------\n sum : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.sum()\n a 6.0\n b 0.4\n dtype: float64\n\n >>> df.sum(axis=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 0.0\n dtype: float64\n\n >>> df.sum(min_count=3)\n a 6.0\n b NaN\n dtype: float64\n\n >>> df.sum(axis=1, min_count=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].sum()\n 6.0\n\n >>> df['a'].sum(min_count=3)\n 6.0\n >>> df['b'].sum(min_count=3)\n nan\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(sum, name='sum', axis=axis, numeric_only=numeric_only, min_count=min_count) | 4,394,613,206,444,410,000 | Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan | python/pyspark/pandas/generic.py | sum | XpressAI/spark | python | def sum(self, axis: Optional[Axis]=None, numeric_only: bool=None, min_count: int=0) -> Union[(Scalar, 'Series')]:
"\n Return the sum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Returns\n -------\n sum : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.sum()\n a 6.0\n b 0.4\n dtype: float64\n\n >>> df.sum(axis=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 0.0\n dtype: float64\n\n >>> df.sum(min_count=3)\n a 6.0\n b NaN\n dtype: float64\n\n >>> df.sum(axis=1, min_count=1)\n 0 1.1\n 1 2.0\n 2 3.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].sum()\n 6.0\n\n >>> df['a'].sum(min_count=3)\n 6.0\n >>> df['b'].sum(min_count=3)\n nan\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(sum, name='sum', axis=axis, numeric_only=numeric_only, min_count=min_count) |
def product(self, axis: Optional[Axis]=None, numeric_only: bool=None, min_count: int=0) -> Union[(Scalar, 'Series')]:
'\n Return the product of the values.\n\n .. note:: unlike pandas\', pandas-on-Spark\'s emulates product by ``exp(sum(log(...)))``\n trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Examples\n --------\n On a DataFrame:\n\n Non-numeric type column is not included to the result.\n\n >>> psdf = ps.DataFrame({\'A\': [1, 2, 3, 4, 5],\n ... \'B\': [10, 20, 30, 40, 50],\n ... \'C\': [\'a\', \'b\', \'c\', \'d\', \'e\']})\n >>> psdf\n A B C\n 0 1 10 a\n 1 2 20 b\n 2 3 30 c\n 3 4 40 d\n 4 5 50 e\n\n >>> psdf.prod()\n A 120\n B 12000000\n dtype: int64\n\n If there is no numeric type columns, returns empty Series.\n\n >>> ps.DataFrame({"key": [\'a\', \'b\', \'c\'], "val": [\'x\', \'y\', \'z\']}).prod()\n Series([], dtype: float64)\n\n On a Series:\n\n >>> ps.Series([1, 2, 3, 4, 5]).prod()\n 120\n\n By default, the product of an empty or all-NA Series is ``1``\n\n >>> ps.Series([]).prod()\n 1.0\n\n This can be controlled with the ``min_count`` parameter\n\n >>> ps.Series([]).prod(min_count=1)\n nan\n '
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when((spark_column == 0), 1).otherwise(0))
sign = F.when(((F.sum(F.when((spark_column < 0), 1).otherwise(0)) % 2) == 0), 1).otherwise((- 1))
scol = F.when((num_zeros > 0), 0).otherwise((sign * F.exp(F.sum(F.log(F.abs(spark_column))))))
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(prod, name='prod', axis=axis, numeric_only=numeric_only, min_count=min_count) | 4,500,819,481,037,292,500 | Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan | python/pyspark/pandas/generic.py | product | XpressAI/spark | python | def product(self, axis: Optional[Axis]=None, numeric_only: bool=None, min_count: int=0) -> Union[(Scalar, 'Series')]:
'\n Return the product of the values.\n\n .. note:: unlike pandas\', pandas-on-Spark\'s emulates product by ``exp(sum(log(...)))``\n trick. Therefore, it only works for positive numbers.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n Examples\n --------\n On a DataFrame:\n\n Non-numeric type column is not included to the result.\n\n >>> psdf = ps.DataFrame({\'A\': [1, 2, 3, 4, 5],\n ... \'B\': [10, 20, 30, 40, 50],\n ... \'C\': [\'a\', \'b\', \'c\', \'d\', \'e\']})\n >>> psdf\n A B C\n 0 1 10 a\n 1 2 20 b\n 2 3 30 c\n 3 4 40 d\n 4 5 50 e\n\n >>> psdf.prod()\n A 120\n B 12000000\n dtype: int64\n\n If there is no numeric type columns, returns empty Series.\n\n >>> ps.DataFrame({"key": [\'a\', \'b\', \'c\'], "val": [\'x\', \'y\', \'z\']}).prod()\n Series([], dtype: float64)\n\n On a Series:\n\n >>> ps.Series([1, 2, 3, 4, 5]).prod()\n 120\n\n By default, the product of an empty or all-NA Series is ``1``\n\n >>> ps.Series([]).prod()\n 1.0\n\n This can be controlled with the ``min_count`` parameter\n\n >>> ps.Series([]).prod(min_count=1)\n nan\n '
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when((spark_column == 0), 1).otherwise(0))
sign = F.when(((F.sum(F.when((spark_column < 0), 1).otherwise(0)) % 2) == 0), 1).otherwise((- 1))
scol = F.when((num_zeros > 0), 0).otherwise((sign * F.exp(F.sum(F.log(F.abs(spark_column))))))
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(prod, name='prod', axis=axis, numeric_only=numeric_only, min_count=min_count) |
def skew(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased skew normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n skew : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.skew() # doctest: +SKIP\n a 0.000000e+00\n b -3.319678e-16\n dtype: float64\n\n On a Series:\n\n >>> df['a'].skew()\n 0.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.skewness(spark_column)
return self._reduce_for_stat_function(skew, name='skew', axis=axis, numeric_only=numeric_only) | 7,805,550,396,065,647,000 | Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0 | python/pyspark/pandas/generic.py | skew | XpressAI/spark | python | def skew(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased skew normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n skew : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.skew() # doctest: +SKIP\n a 0.000000e+00\n b -3.319678e-16\n dtype: float64\n\n On a Series:\n\n >>> df['a'].skew()\n 0.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.skewness(spark_column)
return self._reduce_for_stat_function(skew, name='skew', axis=axis, numeric_only=numeric_only) |
def kurtosis(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).\n Normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n kurt : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.kurtosis()\n a -1.5\n b -1.5\n dtype: float64\n\n On a Series:\n\n >>> df['a'].kurtosis()\n -1.5\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(kurtosis, name='kurtosis', axis=axis, numeric_only=numeric_only) | -2,364,789,027,313,932,300 | Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5 | python/pyspark/pandas/generic.py | kurtosis | XpressAI/spark | python | def kurtosis(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).\n Normalized by N-1.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n kurt : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.kurtosis()\n a -1.5\n b -1.5\n dtype: float64\n\n On a Series:\n\n >>> df['a'].kurtosis()\n -1.5\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(kurtosis, name='kurtosis', axis=axis, numeric_only=numeric_only) |
def min(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the minimum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n min : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.min()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.min(axis=1)\n 0 0.1\n 1 0.2\n 2 0.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].min()\n 1.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
return self._reduce_for_stat_function(F.min, name='min', axis=axis, numeric_only=numeric_only) | 398,092,958,807,587,700 | Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0 | python/pyspark/pandas/generic.py | min | XpressAI/spark | python | def min(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the minimum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n min : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.min()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.min(axis=1)\n 0 0.1\n 1 0.2\n 2 0.3\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].min()\n 1.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
return self._reduce_for_stat_function(F.min, name='min', axis=axis, numeric_only=numeric_only) |
def max(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the maximum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.max()\n a 3.0\n b 0.3\n dtype: float64\n\n >>> df.max(axis=1)\n 0 1.0\n 1 2.0\n 2 3.0\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].max()\n 3.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
return self._reduce_for_stat_function(F.max, name='max', axis=axis, numeric_only=numeric_only) | -8,641,553,948,083,875,000 | Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0 | python/pyspark/pandas/generic.py | max | XpressAI/spark | python | def max(self, axis: Optional[Axis]=None, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return the maximum of the values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility. False is supported; however, the columns should\n be all numeric or all non-numeric.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.max()\n a 3.0\n b 0.3\n dtype: float64\n\n >>> df.max(axis=1)\n 0 1.0\n 1 2.0\n 2 3.0\n 3 NaN\n dtype: float64\n\n On a Series:\n\n >>> df['a'].max()\n 3.0\n "
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
elif ((numeric_only is True) and (axis == 1)):
numeric_only = None
return self._reduce_for_stat_function(F.max, name='max', axis=axis, numeric_only=numeric_only) |
def count(self, axis: Optional[Axis]=None, numeric_only: bool=False) -> Union[(Scalar, 'Series')]:
'\n Count non-NA cells for each column.\n\n The values `None`, `NaN` are considered NA.\n\n Parameters\n ----------\n axis : {0 or ‘index’, 1 or ‘columns’}, default 0\n If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are\n generated for each row.\n numeric_only : bool, default False\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n See Also\n --------\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = ps.DataFrame({"Person":\n ... ["John", "Myla", "Lewis", "John", "Myla"],\n ... "Age": [24., np.nan, 21., 33, 26],\n ... "Single": [False, True, True, True, False]},\n ... columns=["Person", "Age", "Single"])\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n >>> df.count(axis=1)\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n On a Series:\n\n >>> df[\'Person\'].count()\n 5\n\n >>> df[\'Age\'].count()\n 4\n '
return self._reduce_for_stat_function(Frame._count_expr, name='count', axis=axis, numeric_only=numeric_only) | 7,315,654,646,070,643,000 | Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4 | python/pyspark/pandas/generic.py | count | XpressAI/spark | python | def count(self, axis: Optional[Axis]=None, numeric_only: bool=False) -> Union[(Scalar, 'Series')]:
'\n Count non-NA cells for each column.\n\n The values `None`, `NaN` are considered NA.\n\n Parameters\n ----------\n axis : {0 or ‘index’, 1 or ‘columns’}, default 0\n If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are\n generated for each row.\n numeric_only : bool, default False\n If True, include only float, int, boolean columns. This parameter is mainly for\n pandas compatibility.\n\n Returns\n -------\n max : scalar for a Series, and a Series for a DataFrame.\n\n See Also\n --------\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = ps.DataFrame({"Person":\n ... ["John", "Myla", "Lewis", "John", "Myla"],\n ... "Age": [24., np.nan, 21., 33, 26],\n ... "Single": [False, True, True, True, False]},\n ... columns=["Person", "Age", "Single"])\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n >>> df.count(axis=1)\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n On a Series:\n\n >>> df[\'Person\'].count()\n 5\n\n >>> df[\'Age\'].count()\n 4\n '
return self._reduce_for_stat_function(Frame._count_expr, name='count', axis=axis, numeric_only=numeric_only) |
def std(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return sample standard deviation.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n std : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.std()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.std(axis=1)\n 0 0.636396\n 1 1.272792\n 2 1.909188\n 3 NaN\n dtype: float64\n\n >>> df.std(ddof=0)\n a 0.816497\n b 0.081650\n dtype: float64\n\n On a Series:\n\n >>> df['a'].std()\n 1.0\n\n >>> df['a'].std(ddof=0)\n 0.816496580927726\n "
assert (ddof in (0, 1))
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if (ddof == 0):
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(std, name='std', axis=axis, numeric_only=numeric_only, ddof=ddof) | 8,972,190,425,281,151,000 | Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726 | python/pyspark/pandas/generic.py | std | XpressAI/spark | python | def std(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return sample standard deviation.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n std : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.std()\n a 1.0\n b 0.1\n dtype: float64\n\n >>> df.std(axis=1)\n 0 0.636396\n 1 1.272792\n 2 1.909188\n 3 NaN\n dtype: float64\n\n >>> df.std(ddof=0)\n a 0.816497\n b 0.081650\n dtype: float64\n\n On a Series:\n\n >>> df['a'].std()\n 1.0\n\n >>> df['a'].std(ddof=0)\n 0.816496580927726\n "
assert (ddof in (0, 1))
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if (ddof == 0):
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(std, name='std', axis=axis, numeric_only=numeric_only, ddof=ddof) |
def var(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased variance.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n var : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.var()\n a 1.00\n b 0.01\n dtype: float64\n\n >>> df.var(axis=1)\n 0 0.405\n 1 1.620\n 2 3.645\n 3 NaN\n dtype: float64\n\n >>> df.var(ddof=0)\n a 0.666667\n b 0.006667\n dtype: float64\n\n On a Series:\n\n >>> df['a'].var()\n 1.0\n\n >>> df['a'].var(ddof=0)\n 0.6666666666666666\n "
assert (ddof in (0, 1))
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if (ddof == 0):
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(var, name='var', axis=axis, numeric_only=numeric_only, ddof=ddof) | -3,360,667,890,068,724,000 | Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666 | python/pyspark/pandas/generic.py | var | XpressAI/spark | python | def var(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]:
"\n Return unbiased variance.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n var : scalar for a Series, and a Series for a DataFrame.\n\n Examples\n --------\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n On a DataFrame:\n\n >>> df.var()\n a 1.00\n b 0.01\n dtype: float64\n\n >>> df.var(axis=1)\n 0 0.405\n 1 1.620\n 2 3.645\n 3 NaN\n dtype: float64\n\n >>> df.var(ddof=0)\n a 0.666667\n b 0.006667\n dtype: float64\n\n On a Series:\n\n >>> df['a'].var()\n 1.0\n\n >>> df['a'].var(ddof=0)\n 0.6666666666666666\n "
assert (ddof in (0, 1))
axis = validate_axis(axis)
if ((numeric_only is None) and (axis == 0)):
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif (not isinstance(spark_type, NumericType)):
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if (ddof == 0):
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(var, name='var', axis=axis, numeric_only=numeric_only, ddof=ddof) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.