body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_deploy_vm_password_enabled(self):
'Test Deploy Virtual Machine with startVM=false & enabledpassword in\n template\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in stopped state after deployment')
self.debug(('Starting the instance: %s' % self.virtual_machine.name))
self.virtual_machine.start(self.apiclient)
self.debug(('Started the instance: %s' % self.virtual_machine.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in running state after deployment')
return | 5,462,064,970,451,302,000 | Test Deploy Virtual Machine with startVM=false & enabledpassword in
template | test/integration/component/test_stopped_vm.py | test_deploy_vm_password_enabled | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_deploy_vm_password_enabled(self):
'Test Deploy Virtual Machine with startVM=false & enabledpassword in\n template\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in stopped state after deployment')
self.debug(('Starting the instance: %s' % self.virtual_machine.name))
self.virtual_machine.start(self.apiclient)
self.debug(('Started the instance: %s' % self.virtual_machine.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in running state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_vm_per_account(self):
'Test VM limit per account\n '
self.debug(('Updating instance resource limit for account: %s' % self.account.name))
update_resource_limit(self.apiclient, 0, account=self.account.name, domainid=self.account.domainid, max=1)
self.debug(('Deploying VM instance in account: %s' % self.account.name))
virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.assertEqual(virtual_machine.state, 'Stopped', 'Check VM state is Running or not')
with self.assertRaises(Exception):
VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
return | -7,472,055,634,946,024,000 | Test VM limit per account | test/integration/component/test_stopped_vm.py | test_vm_per_account | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_vm_per_account(self):
'\n '
self.debug(('Updating instance resource limit for account: %s' % self.account.name))
update_resource_limit(self.apiclient, 0, account=self.account.name, domainid=self.account.domainid, max=1)
self.debug(('Deploying VM instance in account: %s' % self.account.name))
virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.assertEqual(virtual_machine.state, 'Stopped', 'Check VM state is Running or not')
with self.assertRaises(Exception):
VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_upload_attach_volume(self):
'Test Upload volume and attach to VM in stopped state\n '
self.debug(('Uploading the volume: %s' % self.services['volume']['diskname']))
try:
volume = Volume.upload(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid)
self.debug(('Uploading the volume: %s' % volume.name))
volume.wait_for_upload(self.apiclient)
self.debug('Volume: %s uploaded successfully')
except Exception as e:
self.fail(('Failed to upload the volume: %s' % e))
self.debug(('Deploying VM instance in account: %s' % self.account.name))
virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.assertEqual(virtual_machine.state, 'Stopped', 'Check VM state is Running or not')
virtual_machine.attach_volume(self.apiclient, volume)
return | 5,862,266,767,459,204,000 | Test Upload volume and attach to VM in stopped state | test/integration/component/test_stopped_vm.py | test_upload_attach_volume | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_upload_attach_volume(self):
'\n '
self.debug(('Uploading the volume: %s' % self.services['volume']['diskname']))
try:
volume = Volume.upload(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid)
self.debug(('Uploading the volume: %s' % volume.name))
volume.wait_for_upload(self.apiclient)
self.debug('Volume: %s uploaded successfully')
except Exception as e:
self.fail(('Failed to upload the volume: %s' % e))
self.debug(('Deploying VM instance in account: %s' % self.account.name))
virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.assertEqual(virtual_machine.state, 'Stopped', 'Check VM state is Running or not')
virtual_machine.attach_volume(self.apiclient, volume)
return |
@attr(tags=['advanced', 'advancedns', 'simulator', 'api', 'basic', 'eip', 'sg'])
def test_deployVmOnGivenHost(self):
'Test deploy VM on specific host\n '
hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', state='Up', listall=True)
self.assertEqual(isinstance(hosts, list), True, 'CS should have atleast one host Up and Running')
host = hosts[0]
self.debug(('Deploting VM on host: %s' % host.name))
try:
vm = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, hostid=host.id)
self.debug('Deploy VM succeeded')
except Exception as e:
self.fail(('Deploy VM failed with exception: %s' % e))
self.debug('Cheking the state of deployed VM')
vms = VirtualMachine.list(self.apiclient, id=vm.id, listall=True, account=self.account.name, domainid=self.account.domainid)
self.assertEqual(isinstance(vms, list), True, 'List Vm should return a valid response')
vm_response = vms[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in running state after deployment')
self.assertEqual(vm_response.hostid, host.id, 'Host id where VM is deployed should match')
return | 3,533,336,537,064,077,000 | Test deploy VM on specific host | test/integration/component/test_stopped_vm.py | test_deployVmOnGivenHost | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'advancedns', 'simulator', 'api', 'basic', 'eip', 'sg'])
def test_deployVmOnGivenHost(self):
'\n '
hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', state='Up', listall=True)
self.assertEqual(isinstance(hosts, list), True, 'CS should have atleast one host Up and Running')
host = hosts[0]
self.debug(('Deploting VM on host: %s' % host.name))
try:
vm = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, hostid=host.id)
self.debug('Deploy VM succeeded')
except Exception as e:
self.fail(('Deploy VM failed with exception: %s' % e))
self.debug('Cheking the state of deployed VM')
vms = VirtualMachine.list(self.apiclient, id=vm.id, listall=True, account=self.account.name, domainid=self.account.domainid)
self.assertEqual(isinstance(vms, list), True, 'List Vm should return a valid response')
vm_response = vms[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in running state after deployment')
self.assertEqual(vm_response.hostid, host.id, 'Host id where VM is deployed should match')
return |
@staticmethod
def _get_workload_data(data: dict) -> dict:
'Return data for requested Workload.'
optimization_id = RequestDataProcessor.get_string_value(data, 'id')
optimization_data = OptimizationAPIInterface.get_optimization_details({'id': optimization_id})
return optimization_data | 1,312,375,586,110,402,800 | Return data for requested Workload. | neural_compressor/ux/web/service/optimization.py | _get_workload_data | intel/lp-opt-tool | python | @staticmethod
def _get_workload_data(data: dict) -> dict:
optimization_id = RequestDataProcessor.get_string_value(data, 'id')
optimization_data = OptimizationAPIInterface.get_optimization_details({'id': optimization_id})
return optimization_data |
def __init__(self, node) -> None:
' :param node: pyrlang.node.Node\n '
GenServer.__init__(self, node_name=node.node_name_, accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel')) | 5,099,605,340,179,313,000 | :param node: pyrlang.node.Node | pyrlang/net_kernel.py | __init__ | AlexKovalevych/Pyrlang | python | def __init__(self, node) -> None:
' \n '
GenServer.__init__(self, node_name=node.node_name_, accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel')) |
def sort_rings(index_rings: List[List[Tuple[(int, int)]]], vertices: npt.NDArray[np.float32]) -> SortedRingType:
'Sorts a list of index-rings.\n\n Takes a list of unsorted index rings and sorts them into\n "exterior" and "interior" components. Any doubly-nested rings\n are considered exterior rings.\n\n Parameters\n ----------\n index_rings : List[List[Tuple[int, int]]]\n Unosorted list of list of mesh edges as specified by end node\n indexs of each edge.\n vertices : npt.NDArray[np.float32]\n 2D ``n x 2`` array of node coordinate couples.\n\n Returns\n -------\n SortedRingType\n Dictionary of information aboout polygon boundaries extracted\n based on the input\n\n Notes\n -----\n The return value is a mapping of ring index to dictionary\n containing exterior and interior linear ring information as\n numpy array\n This function is not currently used, instead a different faster\n approach is used for boundary and polygon calculation from\n elements.\n '
areas = []
for index_ring in index_rings:
(e0, e1) = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {'exterior': np.asarray(exterior), 'interiors': []}
(e0, e1) = [list(t) for t in zip(*exterior)]
path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
while (len(index_rings) > 0):
potential_interiors = []
for (i, index_ring) in enumerate(index_rings):
(e0, e1) = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
real_interiors = []
for (i, p_interior) in reversed(list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k] for (j, k) in reversed(list(enumerate(potential_interiors))) if (i != j)]
has_parent = False
for _path in check:
(e0, e1) = [list(t) for t in zip(*_path)]
_path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if (not has_parent):
real_interiors.append(p_interior)
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(np.asarray(index_rings.pop(i)))
areas.pop(i)
if (len(index_rings) > 0):
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {'exterior': np.asarray(exterior), 'interiors': []}
(e0, e1) = [list(t) for t in zip(*exterior)]
path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
return _index_rings | -7,161,474,044,817,911,000 | Sorts a list of index-rings.
Takes a list of unsorted index rings and sorts them into
"exterior" and "interior" components. Any doubly-nested rings
are considered exterior rings.
Parameters
----------
index_rings : List[List[Tuple[int, int]]]
Unosorted list of list of mesh edges as specified by end node
indexs of each edge.
vertices : npt.NDArray[np.float32]
2D ``n x 2`` array of node coordinate couples.
Returns
-------
SortedRingType
Dictionary of information aboout polygon boundaries extracted
based on the input
Notes
-----
The return value is a mapping of ring index to dictionary
containing exterior and interior linear ring information as
numpy array
This function is not currently used, instead a different faster
approach is used for boundary and polygon calculation from
elements. | ocsmesh/mesh/mesh.py | sort_rings | noaa-ocs-modeling/OCSMesh | python | def sort_rings(index_rings: List[List[Tuple[(int, int)]]], vertices: npt.NDArray[np.float32]) -> SortedRingType:
'Sorts a list of index-rings.\n\n Takes a list of unsorted index rings and sorts them into\n "exterior" and "interior" components. Any doubly-nested rings\n are considered exterior rings.\n\n Parameters\n ----------\n index_rings : List[List[Tuple[int, int]]]\n Unosorted list of list of mesh edges as specified by end node\n indexs of each edge.\n vertices : npt.NDArray[np.float32]\n 2D ``n x 2`` array of node coordinate couples.\n\n Returns\n -------\n SortedRingType\n Dictionary of information aboout polygon boundaries extracted\n based on the input\n\n Notes\n -----\n The return value is a mapping of ring index to dictionary\n containing exterior and interior linear ring information as\n numpy array\n This function is not currently used, instead a different faster\n approach is used for boundary and polygon calculation from\n elements.\n '
areas = []
for index_ring in index_rings:
(e0, e1) = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {'exterior': np.asarray(exterior), 'interiors': []}
(e0, e1) = [list(t) for t in zip(*exterior)]
path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
while (len(index_rings) > 0):
potential_interiors = []
for (i, index_ring) in enumerate(index_rings):
(e0, e1) = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
real_interiors = []
for (i, p_interior) in reversed(list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k] for (j, k) in reversed(list(enumerate(potential_interiors))) if (i != j)]
has_parent = False
for _path in check:
(e0, e1) = [list(t) for t in zip(*_path)]
_path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if (not has_parent):
real_interiors.append(p_interior)
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(np.asarray(index_rings.pop(i)))
areas.pop(i)
if (len(index_rings) > 0):
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {'exterior': np.asarray(exterior), 'interiors': []}
(e0, e1) = [list(t) for t in zip(*exterior)]
path = Path(vertices[(e0 + [e0[0]]), :], closed=True)
return _index_rings |
def _mesh_interpolate_worker(coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[(str, Path)], chunk_size: Optional[int], method: Literal[('spline', 'linear', 'nearest')]='spline', filter_by_shape: bool=False):
"Interpolator worker function to be used in parallel calls\n\n Parameters\n ----------\n coords : npt.NDArray[np.float32]\n Mesh node coordinates.\n coords_crs : CRS\n Coordinate reference system of the input mesh coordinates.\n raster_path : str or Path\n Path to the raster temporary working file.\n chunk_size : int or None\n Chunk size for windowing over the raster.\n method : {'spline', 'linear', 'nearest'}, default='spline'\n Method of interpolation.\n filter_by_shape : bool\n Flag for node filtering based on raster bbox or shape\n\n Returns\n -------\n idxs : npt.NDArray[bool]\n Mask of the nodes whose values are updated by current\n interpolation\n values : npt.NDArray[np.float32]\n Interpolated values.\n\n Raises\n ------\n ValueError\n If specified interpolation `method` is not supported.\n "
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if (not raster.crs.equals(coords_crs)):
transformer = Transformer.from_crs(coords_crs, raster.crs, always_xy=True)
(coords[:, 0], coords[:, 1]) = transformer.transform(coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
zi = raster.get_values(window=window, masked=True)
if (not filter_by_shape):
_idxs = np.logical_and(np.logical_and((np.min(xi) <= coords[:, 0]), (np.max(xi) >= coords[:, 0])), np.logical_and((np.min(yi) <= coords[:, 1]), (np.max(yi) >= coords[:, 1])))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if (method == 'spline'):
f = RectBivariateSpline(xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0)
_values = f.ev(coords[(_idxs, 0)], coords[(_idxs, 1)])
elif (method in ['nearest', 'linear']):
if np.any(zi.mask):
m_interp = RegularGridInterpolator((xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method)
interp_mask = (m_interp(coords[_idxs]) > 0)
f = RegularGridInterpolator((xi, np.flip(yi)), np.flipud(zi).T, method=method)
_values = f(coords[_idxs])
else:
raise ValueError(f'Invalid value method specified <{method}>!')
if (interp_mask is not None):
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
_idxs[_idxs] = helper
_values = _values[(~ interp_mask)]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values)) | -2,363,268,281,550,425,600 | Interpolator worker function to be used in parallel calls
Parameters
----------
coords : npt.NDArray[np.float32]
Mesh node coordinates.
coords_crs : CRS
Coordinate reference system of the input mesh coordinates.
raster_path : str or Path
Path to the raster temporary working file.
chunk_size : int or None
Chunk size for windowing over the raster.
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
idxs : npt.NDArray[bool]
Mask of the nodes whose values are updated by current
interpolation
values : npt.NDArray[np.float32]
Interpolated values.
Raises
------
ValueError
If specified interpolation `method` is not supported. | ocsmesh/mesh/mesh.py | _mesh_interpolate_worker | noaa-ocs-modeling/OCSMesh | python | def _mesh_interpolate_worker(coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[(str, Path)], chunk_size: Optional[int], method: Literal[('spline', 'linear', 'nearest')]='spline', filter_by_shape: bool=False):
"Interpolator worker function to be used in parallel calls\n\n Parameters\n ----------\n coords : npt.NDArray[np.float32]\n Mesh node coordinates.\n coords_crs : CRS\n Coordinate reference system of the input mesh coordinates.\n raster_path : str or Path\n Path to the raster temporary working file.\n chunk_size : int or None\n Chunk size for windowing over the raster.\n method : {'spline', 'linear', 'nearest'}, default='spline'\n Method of interpolation.\n filter_by_shape : bool\n Flag for node filtering based on raster bbox or shape\n\n Returns\n -------\n idxs : npt.NDArray[bool]\n Mask of the nodes whose values are updated by current\n interpolation\n values : npt.NDArray[np.float32]\n Interpolated values.\n\n Raises\n ------\n ValueError\n If specified interpolation `method` is not supported.\n "
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if (not raster.crs.equals(coords_crs)):
transformer = Transformer.from_crs(coords_crs, raster.crs, always_xy=True)
(coords[:, 0], coords[:, 1]) = transformer.transform(coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
zi = raster.get_values(window=window, masked=True)
if (not filter_by_shape):
_idxs = np.logical_and(np.logical_and((np.min(xi) <= coords[:, 0]), (np.max(xi) >= coords[:, 0])), np.logical_and((np.min(yi) <= coords[:, 1]), (np.max(yi) >= coords[:, 1])))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if (method == 'spline'):
f = RectBivariateSpline(xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0)
_values = f.ev(coords[(_idxs, 0)], coords[(_idxs, 1)])
elif (method in ['nearest', 'linear']):
if np.any(zi.mask):
m_interp = RegularGridInterpolator((xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method)
interp_mask = (m_interp(coords[_idxs]) > 0)
f = RegularGridInterpolator((xi, np.flip(yi)), np.flipud(zi).T, method=method)
_values = f(coords[_idxs])
else:
raise ValueError(f'Invalid value method specified <{method}>!')
if (interp_mask is not None):
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
_idxs[_idxs] = helper
_values = _values[(~ interp_mask)]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values)) |
def __init__(self, mesh: jigsaw_msh_t) -> None:
"Initialize Euclidean mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n The underlying jigsaw_msh_t object to hold onto mesh data.\n\n Raises\n ------\n TypeError\n If input mesh is not of `jigsaw_msh_t` type.\n ValueError\n If input mesh's `mshID` is not equal to ``euclidean-mesh``.\n If input mesh has `crs` property which is not of `CRS` type.\n "
if (not isinstance(mesh, jigsaw_msh_t)):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, not type {type(mesh)}.')
if (mesh.mshID != 'euclidean-mesh'):
raise ValueError(f"Argument mesh has property mshID={mesh.mshID}, but expected 'euclidean-mesh'.")
if (not hasattr(mesh, 'crs')):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
elif (not isinstance(mesh.crs, CRS)):
raise ValueError(f'crs property must be of type {CRS}, not type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh | 8,873,620,727,186,494,000 | Initialize Euclidean mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
TypeError
If input mesh is not of `jigsaw_msh_t` type.
ValueError
If input mesh's `mshID` is not equal to ``euclidean-mesh``.
If input mesh has `crs` property which is not of `CRS` type. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: jigsaw_msh_t) -> None:
"Initialize Euclidean mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n The underlying jigsaw_msh_t object to hold onto mesh data.\n\n Raises\n ------\n TypeError\n If input mesh is not of `jigsaw_msh_t` type.\n ValueError\n If input mesh's `mshID` is not equal to ``euclidean-mesh``.\n If input mesh has `crs` property which is not of `CRS` type.\n "
if (not isinstance(mesh, jigsaw_msh_t)):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, not type {type(mesh)}.')
if (mesh.mshID != 'euclidean-mesh'):
raise ValueError(f"Argument mesh has property mshID={mesh.mshID}, but expected 'euclidean-mesh'.")
if (not hasattr(mesh, 'crs')):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
elif (not isinstance(mesh.crs, CRS)):
raise ValueError(f'crs property must be of type {CRS}, not type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh |
def write(self, path: Union[(str, os.PathLike)], overwrite: bool=False, format: Literal[('grd', '2dm', 'msh', 'vtk')]='grd') -> None:
"Export the mesh object to the disk\n\n Parameters\n ----------\n path : path-like\n Path to which the mesh should be exported.\n overwrite : bool, default=False\n Whether to overwrite, if a file already exists in `path`\n format : { 'grd', '2dm', 'msh', 'vtk' }\n Format of the export, SMS-2DM or GRD.\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If specified export format is **not** supported.\n "
path = pathlib.Path(path)
if (path.exists() and (overwrite is not True)):
raise IOError(f'File {str(path)} exists and overwrite is not True.')
if (format == 'grd'):
grd_dict = utils.msh_t_to_grd(self.msh_t)
if (self._boundaries and self._boundaries.data):
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif (format == '2dm'):
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif (format == 'msh'):
savemsh(str(path), self.msh_t)
elif (format == 'vtk'):
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.') | -6,598,742,017,123,702,000 | Export the mesh object to the disk
Parameters
----------
path : path-like
Path to which the mesh should be exported.
overwrite : bool, default=False
Whether to overwrite, if a file already exists in `path`
format : { 'grd', '2dm', 'msh', 'vtk' }
Format of the export, SMS-2DM or GRD.
Returns
-------
None
Raises
------
ValueError
If specified export format is **not** supported. | ocsmesh/mesh/mesh.py | write | noaa-ocs-modeling/OCSMesh | python | def write(self, path: Union[(str, os.PathLike)], overwrite: bool=False, format: Literal[('grd', '2dm', 'msh', 'vtk')]='grd') -> None:
"Export the mesh object to the disk\n\n Parameters\n ----------\n path : path-like\n Path to which the mesh should be exported.\n overwrite : bool, default=False\n Whether to overwrite, if a file already exists in `path`\n format : { 'grd', '2dm', 'msh', 'vtk' }\n Format of the export, SMS-2DM or GRD.\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If specified export format is **not** supported.\n "
path = pathlib.Path(path)
if (path.exists() and (overwrite is not True)):
raise IOError(f'File {str(path)} exists and overwrite is not True.')
if (format == 'grd'):
grd_dict = utils.msh_t_to_grd(self.msh_t)
if (self._boundaries and self._boundaries.data):
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif (format == '2dm'):
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif (format == 'msh'):
savemsh(str(path), self.msh_t)
elif (format == 'vtk'):
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.') |
@property
def tria3(self):
'Reference to underlying mesh tirangle element structure'
return self.msh_t.tria3 | -3,376,814,324,331,499,500 | Reference to underlying mesh tirangle element structure | ocsmesh/mesh/mesh.py | tria3 | noaa-ocs-modeling/OCSMesh | python | @property
def tria3(self):
return self.msh_t.tria3 |
@property
def triangles(self):
'Reference to underlying mesh triangle element index array'
return self.msh_t.tria3['index'] | 2,047,632,625,657,273,300 | Reference to underlying mesh triangle element index array | ocsmesh/mesh/mesh.py | triangles | noaa-ocs-modeling/OCSMesh | python | @property
def triangles(self):
return self.msh_t.tria3['index'] |
@property
def quad4(self):
'Reference to underlying mesh quadrangle element structure'
return self.msh_t.quad4 | -9,157,907,240,058,236,000 | Reference to underlying mesh quadrangle element structure | ocsmesh/mesh/mesh.py | quad4 | noaa-ocs-modeling/OCSMesh | python | @property
def quad4(self):
return self.msh_t.quad4 |
@property
def quads(self):
'Reference to underlying mesh quadrangle element index array'
return self.msh_t.quad4['index'] | -2,822,426,976,579,586,600 | Reference to underlying mesh quadrangle element index array | ocsmesh/mesh/mesh.py | quads | noaa-ocs-modeling/OCSMesh | python | @property
def quads(self):
return self.msh_t.quad4['index'] |
@property
def crs(self):
'Reference to underlying mesh crs'
return self.msh_t.crs | -7,577,520,252,728,717,000 | Reference to underlying mesh crs | ocsmesh/mesh/mesh.py | crs | noaa-ocs-modeling/OCSMesh | python | @property
def crs(self):
return self.msh_t.crs |
@property
def hull(self):
'Reference to hull calculator helper object'
if (self._hull is None):
self._hull = Hull(self)
return self._hull | -1,355,562,474,230,493,700 | Reference to hull calculator helper object | ocsmesh/mesh/mesh.py | hull | noaa-ocs-modeling/OCSMesh | python | @property
def hull(self):
if (self._hull is None):
self._hull = Hull(self)
return self._hull |
@property
def nodes(self):
'Reference to node handler helper object'
if (self._nodes is None):
self._nodes = Nodes(self)
return self._nodes | -6,295,479,252,160,770,000 | Reference to node handler helper object | ocsmesh/mesh/mesh.py | nodes | noaa-ocs-modeling/OCSMesh | python | @property
def nodes(self):
if (self._nodes is None):
self._nodes = Nodes(self)
return self._nodes |
@property
def elements(self):
'Reference to element handler helper object'
if (self._elements is None):
self._elements = Elements(self)
return self._elements | 409,851,697,075,555,000 | Reference to element handler helper object | ocsmesh/mesh/mesh.py | elements | noaa-ocs-modeling/OCSMesh | python | @property
def elements(self):
if (self._elements is None):
self._elements = Elements(self)
return self._elements |
def __init__(self, mesh: jigsaw_msh_t) -> None:
'Initialize Euclidean 2D mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n The underlying jigsaw_msh_t object to hold onto mesh data.\n\n Raises\n ------\n ValueError\n If number of mesh dimensions is not equal to ``2``.\n '
super().__init__(mesh)
self._boundaries = None
if (mesh.ndims != (+ 2)):
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, but expected ndims=2.')
if (len(self.msh_t.value) == 0):
self.msh_t.value = np.array(np.full((self.vert2['coord'].shape[0], 1), np.nan)) | 5,854,192,369,247,189,000 | Initialize Euclidean 2D mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
ValueError
If number of mesh dimensions is not equal to ``2``. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: jigsaw_msh_t) -> None:
'Initialize Euclidean 2D mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n The underlying jigsaw_msh_t object to hold onto mesh data.\n\n Raises\n ------\n ValueError\n If number of mesh dimensions is not equal to ``2``.\n '
super().__init__(mesh)
self._boundaries = None
if (mesh.ndims != (+ 2)):
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, but expected ndims=2.')
if (len(self.msh_t.value) == 0):
self.msh_t.value = np.array(np.full((self.vert2['coord'].shape[0], 1), np.nan)) |
def get_bbox(self, crs: Union[(str, CRS, None)]=None, output_type: Literal[(None, 'polygon', 'bbox')]=None) -> Union[(Polygon, Bbox)]:
"Get the bounding box of mesh elements.\n\n Parameters\n ----------\n crs : str or CRS or None, default=None\n CRS to transform the calculated bounding box into before\n returning\n output_type : { None, 'polygon', 'bbox'}, default=None\n Output type\n\n Returns\n -------\n Polygon or Bbox\n Bounding box of the mesh elements.\n "
output_type = ('polygon' if (output_type is None) else output_type)
(xmin, xmax) = (np.min(self.coord[:, 0]), np.max(self.coord[:, 0]))
(ymin, ymax) = (np.min(self.coord[:, 1]), np.max(self.coord[:, 1]))
crs = (self.crs if (crs is None) else crs)
if (crs is not None):
if (not self.crs.equals(crs)):
transformer = Transformer.from_crs(self.crs, crs, always_xy=True)
((xmin, xmax), (ymin, ymax)) = transformer.transform((xmin, xmax), (ymin, ymax))
if (output_type == 'polygon'):
return box(xmin, ymin, xmax, ymax)
elif (output_type == 'bbox'):
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError("Argument output_type must a string literal 'polygon' or 'bbox'") | -2,025,501,107,320,347,000 | Get the bounding box of mesh elements.
Parameters
----------
crs : str or CRS or None, default=None
CRS to transform the calculated bounding box into before
returning
output_type : { None, 'polygon', 'bbox'}, default=None
Output type
Returns
-------
Polygon or Bbox
Bounding box of the mesh elements. | ocsmesh/mesh/mesh.py | get_bbox | noaa-ocs-modeling/OCSMesh | python | def get_bbox(self, crs: Union[(str, CRS, None)]=None, output_type: Literal[(None, 'polygon', 'bbox')]=None) -> Union[(Polygon, Bbox)]:
"Get the bounding box of mesh elements.\n\n Parameters\n ----------\n crs : str or CRS or None, default=None\n CRS to transform the calculated bounding box into before\n returning\n output_type : { None, 'polygon', 'bbox'}, default=None\n Output type\n\n Returns\n -------\n Polygon or Bbox\n Bounding box of the mesh elements.\n "
output_type = ('polygon' if (output_type is None) else output_type)
(xmin, xmax) = (np.min(self.coord[:, 0]), np.max(self.coord[:, 0]))
(ymin, ymax) = (np.min(self.coord[:, 1]), np.max(self.coord[:, 1]))
crs = (self.crs if (crs is None) else crs)
if (crs is not None):
if (not self.crs.equals(crs)):
transformer = Transformer.from_crs(self.crs, crs, always_xy=True)
((xmin, xmax), (ymin, ymax)) = transformer.transform((xmin, xmax), (ymin, ymax))
if (output_type == 'polygon'):
return box(xmin, ymin, xmax, ymax)
elif (output_type == 'bbox'):
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError("Argument output_type must a string literal 'polygon' or 'bbox'") |
@property
def boundaries(self):
'Handle to boundaries calculator helper object'
if (self._boundaries is None):
self._boundaries = Boundaries(self)
return self._boundaries | 368,520,437,063,327,040 | Handle to boundaries calculator helper object | ocsmesh/mesh/mesh.py | boundaries | noaa-ocs-modeling/OCSMesh | python | @property
def boundaries(self):
if (self._boundaries is None):
self._boundaries = Boundaries(self)
return self._boundaries |
def tricontourf(self, **kwargs) -> Axes:
'Generate contour for the data of triangular elements of the mesh\n\n Parameters\n ----------\n **kwargs : dict, optional\n Passed to underlying `matplotlib` API.\n\n Returns\n -------\n Axes\n Axes on which the filled contour is drawn.\n '
return utils.tricontourf(self.msh_t, **kwargs) | -141,155,622,674,995,950 | Generate contour for the data of triangular elements of the mesh
Parameters
----------
**kwargs : dict, optional
Passed to underlying `matplotlib` API.
Returns
-------
Axes
Axes on which the filled contour is drawn. | ocsmesh/mesh/mesh.py | tricontourf | noaa-ocs-modeling/OCSMesh | python | def tricontourf(self, **kwargs) -> Axes:
'Generate contour for the data of triangular elements of the mesh\n\n Parameters\n ----------\n **kwargs : dict, optional\n Passed to underlying `matplotlib` API.\n\n Returns\n -------\n Axes\n Axes on which the filled contour is drawn.\n '
return utils.tricontourf(self.msh_t, **kwargs) |
def interpolate(self, raster: Union[(Raster, List[Raster])], method: Literal[('spline', 'linear', 'nearest')]='spline', nprocs: Optional[int]=None, info_out_path: Union[(pathlib.Path, str, None)]=None, filter_by_shape: bool=False) -> None:
"Interplate values from raster inputs to the mesh nodes.\n\n Parameters\n ----------\n raster : Raster or list of Raster\n A single or a list of rasters from which values are\n interpolated onto the mesh\n method : {'spline', 'linear', 'nearest'}, default='spline'\n Method of interpolation.\n nprocs : int or None, default=None\n Number of workers to use when interpolating data.\n info_out_path : pathlike or str or None\n Path for the output node interpolation information file\n filter_by_shape : bool\n Flag for node filtering based on raster bbox or shape\n\n Returns\n -------\n None\n "
if isinstance(raster, Raster):
raster = [raster]
nprocs = ((- 1) if (nprocs is None) else nprocs)
nprocs = (cpu_count() if (nprocs == (- 1)) else nprocs)
if (nprocs > 1):
with Pool(processes=nprocs) as pool:
res = pool.starmap(_mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster])
pool.join()
else:
res = [_mesh_interpolate_worker(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for ((mask, _values), rast) in zip(res, raster):
values[mask] = _values
if (info_out_path is not None):
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if (rast_crs.sub_crs_list is not None):
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
vert_cs = sub_crs
elif (rast_crs.source_crs is not None):
if rast_crs.source_crs.is_vertical:
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({idx: (rast.path, vert_cs_name) for idx in idxs})
if (info_out_path is not None):
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if (not self.crs.is_geographic):
transformer = Transformer.from_crs(self.crs, CRS.from_epsg(4326), always_xy=True)
(geo_coords[:, 0], geo_coords[:, 1]) = transformer.transform(coords[:, 0], coords[:, 1])
vd_idxs = np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(index=vd_idxs, data={'x': coords[(vd_idxs, 0)], 'y': coords[(vd_idxs, 1)], 'lat': geo_coords[(vd_idxs, 0)], 'lon': geo_coords[(vd_idxs, 1)], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()]})
df_interp_info.sort_index().to_csv(info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) | 9,192,351,401,424,472,000 | Interplate values from raster inputs to the mesh nodes.
Parameters
----------
raster : Raster or list of Raster
A single or a list of rasters from which values are
interpolated onto the mesh
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
nprocs : int or None, default=None
Number of workers to use when interpolating data.
info_out_path : pathlike or str or None
Path for the output node interpolation information file
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
None | ocsmesh/mesh/mesh.py | interpolate | noaa-ocs-modeling/OCSMesh | python | def interpolate(self, raster: Union[(Raster, List[Raster])], method: Literal[('spline', 'linear', 'nearest')]='spline', nprocs: Optional[int]=None, info_out_path: Union[(pathlib.Path, str, None)]=None, filter_by_shape: bool=False) -> None:
"Interplate values from raster inputs to the mesh nodes.\n\n Parameters\n ----------\n raster : Raster or list of Raster\n A single or a list of rasters from which values are\n interpolated onto the mesh\n method : {'spline', 'linear', 'nearest'}, default='spline'\n Method of interpolation.\n nprocs : int or None, default=None\n Number of workers to use when interpolating data.\n info_out_path : pathlike or str or None\n Path for the output node interpolation information file\n filter_by_shape : bool\n Flag for node filtering based on raster bbox or shape\n\n Returns\n -------\n None\n "
if isinstance(raster, Raster):
raster = [raster]
nprocs = ((- 1) if (nprocs is None) else nprocs)
nprocs = (cpu_count() if (nprocs == (- 1)) else nprocs)
if (nprocs > 1):
with Pool(processes=nprocs) as pool:
res = pool.starmap(_mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster])
pool.join()
else:
res = [_mesh_interpolate_worker(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for ((mask, _values), rast) in zip(res, raster):
values[mask] = _values
if (info_out_path is not None):
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if (rast_crs.sub_crs_list is not None):
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
vert_cs = sub_crs
elif (rast_crs.source_crs is not None):
if rast_crs.source_crs.is_vertical:
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({idx: (rast.path, vert_cs_name) for idx in idxs})
if (info_out_path is not None):
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if (not self.crs.is_geographic):
transformer = Transformer.from_crs(self.crs, CRS.from_epsg(4326), always_xy=True)
(geo_coords[:, 0], geo_coords[:, 1]) = transformer.transform(coords[:, 0], coords[:, 1])
vd_idxs = np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(index=vd_idxs, data={'x': coords[(vd_idxs, 0)], 'y': coords[(vd_idxs, 1)], 'lat': geo_coords[(vd_idxs, 0)], 'lon': geo_coords[(vd_idxs, 1)], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()]})
df_interp_info.sort_index().to_csv(info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) |
def get_contour(self, level: float) -> LineString:
'Extract contour lines at the specified `level` from mesh values\n\n Parameters\n ----------\n level : float\n The level at which contour lines must be extracted.\n\n Returns\n -------\n LineString\n Extracted and merged contour lines.\n\n Raises\n ------\n ValueError\n If mesh has nodes that have null value `np.nan`.\n '
for attr in ['quad4', 'hexa8']:
if (len(getattr(self.msh_t, attr)) > 0):
warnings.warn('Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError('Mesh contains invalid values. Raster values mustbe interpolated to the mesh before generating boundaries.')
(x, y) = (coords[:, 0], coords[:, 1])
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
(fig, ax) = plt.subplots()
ax.tricontour(x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
pass
return linemerge(features) | 8,760,211,111,063,419,000 | Extract contour lines at the specified `level` from mesh values
Parameters
----------
level : float
The level at which contour lines must be extracted.
Returns
-------
LineString
Extracted and merged contour lines.
Raises
------
ValueError
If mesh has nodes that have null value `np.nan`. | ocsmesh/mesh/mesh.py | get_contour | noaa-ocs-modeling/OCSMesh | python | def get_contour(self, level: float) -> LineString:
'Extract contour lines at the specified `level` from mesh values\n\n Parameters\n ----------\n level : float\n The level at which contour lines must be extracted.\n\n Returns\n -------\n LineString\n Extracted and merged contour lines.\n\n Raises\n ------\n ValueError\n If mesh has nodes that have null value `np.nan`.\n '
for attr in ['quad4', 'hexa8']:
if (len(getattr(self.msh_t, attr)) > 0):
warnings.warn('Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError('Mesh contains invalid values. Raster values mustbe interpolated to the mesh before generating boundaries.')
(x, y) = (coords[:, 0], coords[:, 1])
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
(fig, ax) = plt.subplots()
ax.tricontour(x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
pass
return linemerge(features) |
def get_multipolygon(self, zmin: Optional[float]=None, zmax: Optional[float]=None) -> MultiPolygon:
'Calculate multipolygon covering mesh elements (hull)\n\n Parameters\n ----------\n zmin : float or None\n Minimum elevation to consider for multipolygon extraction\n zmax : float or None\n Maximum elevation to consider for multipolygon extraction\n\n Returns\n -------\n MultiPolygon\n Calculated multipolygon shape\n '
values = self.msh_t.value
mask = np.ones(values.shape)
if (zmin is not None):
mask = np.logical_and(mask, (values > zmin))
if (zmax is not None):
mask = np.logical_and(mask, (values < zmax))
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(self.msh_t, verts_in, can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {tuple(coo): idx for (idx, coo) in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=(lambda p: p.area), reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for (e, ring) in enumerate(rings[:(- 1)]):
path = Path(ring.coords, closed=True)
n_parents = (n_parents + np.pad(np.array([path.contains_point(pt) for pt in represent[(e + 1):]]), ((e + 1), 0), 'constant', constant_values=0))
polys = [p for (e, p) in enumerate(polys) if (not (n_parents[e] % 2))]
return MultiPolygon(polys) | -5,949,840,052,573,475,000 | Calculate multipolygon covering mesh elements (hull)
Parameters
----------
zmin : float or None
Minimum elevation to consider for multipolygon extraction
zmax : float or None
Maximum elevation to consider for multipolygon extraction
Returns
-------
MultiPolygon
Calculated multipolygon shape | ocsmesh/mesh/mesh.py | get_multipolygon | noaa-ocs-modeling/OCSMesh | python | def get_multipolygon(self, zmin: Optional[float]=None, zmax: Optional[float]=None) -> MultiPolygon:
'Calculate multipolygon covering mesh elements (hull)\n\n Parameters\n ----------\n zmin : float or None\n Minimum elevation to consider for multipolygon extraction\n zmax : float or None\n Maximum elevation to consider for multipolygon extraction\n\n Returns\n -------\n MultiPolygon\n Calculated multipolygon shape\n '
values = self.msh_t.value
mask = np.ones(values.shape)
if (zmin is not None):
mask = np.logical_and(mask, (values > zmin))
if (zmax is not None):
mask = np.logical_and(mask, (values < zmax))
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(self.msh_t, verts_in, can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {tuple(coo): idx for (idx, coo) in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=(lambda p: p.area), reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for (e, ring) in enumerate(rings[:(- 1)]):
path = Path(ring.coords, closed=True)
n_parents = (n_parents + np.pad(np.array([path.contains_point(pt) for pt in represent[(e + 1):]]), ((e + 1), 0), 'constant', constant_values=0))
polys = [p for (e, p) in enumerate(polys) if (not (n_parents[e] % 2))]
return MultiPolygon(polys) |
@property
def vert2(self):
'Reference to underlying mesh 2D vertices structure'
return self.msh_t.vert2 | 315,863,749,805,218,560 | Reference to underlying mesh 2D vertices structure | ocsmesh/mesh/mesh.py | vert2 | noaa-ocs-modeling/OCSMesh | python | @property
def vert2(self):
return self.msh_t.vert2 |
@property
def value(self):
'Reference to underlying mesh values'
return self.msh_t.value | 541,604,266,528,932,800 | Reference to underlying mesh values | ocsmesh/mesh/mesh.py | value | noaa-ocs-modeling/OCSMesh | python | @property
def value(self):
return self.msh_t.value |
@property
def bbox(self):
'Calculates and returns bounding box of the mesh hull.\n\n See Also\n --------\n get_bbox\n '
return self.get_bbox() | -1,139,814,721,322,906,000 | Calculates and returns bounding box of the mesh hull.
See Also
--------
get_bbox | ocsmesh/mesh/mesh.py | bbox | noaa-ocs-modeling/OCSMesh | python | @property
def bbox(self):
'Calculates and returns bounding box of the mesh hull.\n\n See Also\n --------\n get_bbox\n '
return self.get_bbox() |
def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
'Construct a concrete mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n Input jigsaw mesh object\n\n Returns\n -------\n MeshType\n Mesh object created from the input\n\n Raises\n ------\n TypeError\n Input `mesh` is not a `jigsaw_msh_t` object.\n NotImplementedError\n Input `mesh` object cannot be used to create a EuclideanMesh2D\n '
if (not isinstance(mesh, jigsaw_msh_t)):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, not type {type(mesh)}.')
if (mesh.mshID == 'euclidean-mesh'):
if (mesh.ndims == 2):
return EuclideanMesh2D(mesh)
raise NotImplementedError(f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.') | 5,430,746,854,186,812,000 | Construct a concrete mesh object.
Parameters
----------
mesh : jigsaw_msh_t
Input jigsaw mesh object
Returns
-------
MeshType
Mesh object created from the input
Raises
------
TypeError
Input `mesh` is not a `jigsaw_msh_t` object.
NotImplementedError
Input `mesh` object cannot be used to create a EuclideanMesh2D | ocsmesh/mesh/mesh.py | __new__ | noaa-ocs-modeling/OCSMesh | python | def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
'Construct a concrete mesh object.\n\n Parameters\n ----------\n mesh : jigsaw_msh_t\n Input jigsaw mesh object\n\n Returns\n -------\n MeshType\n Mesh object created from the input\n\n Raises\n ------\n TypeError\n Input `mesh` is not a `jigsaw_msh_t` object.\n NotImplementedError\n Input `mesh` object cannot be used to create a EuclideanMesh2D\n '
if (not isinstance(mesh, jigsaw_msh_t)):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, not type {type(mesh)}.')
if (mesh.mshID == 'euclidean-mesh'):
if (mesh.ndims == 2):
return EuclideanMesh2D(mesh)
raise NotImplementedError(f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.') |
@staticmethod
def open(path: Union[(str, Path)], crs: Optional[CRS]=None) -> MeshType:
'Read mesh from a file on disk\n\n Parameters\n ----------\n path : path-like\n Path to the file containig mesh.\n crs : CRS or None, default=None\n CRS of the mesh in the path. Overwrites any info read\n from file, no transformation is done.\n\n Returns\n -------\n MeshType\n Mesh object created by reading the file.\n\n Raises\n ------\n TypeError\n If cannot determine the input mesh type.\n\n Notes\n -----\n Currently only SMS-2DM and GRD formats are supported for\n reading.\n '
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e:
if ('not a valid grd file' in str(e)):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e:
pass
raise TypeError(f'Unable to automatically determine file type for {str(path)}.') | -5,015,466,048,379,708,000 | Read mesh from a file on disk
Parameters
----------
path : path-like
Path to the file containig mesh.
crs : CRS or None, default=None
CRS of the mesh in the path. Overwrites any info read
from file, no transformation is done.
Returns
-------
MeshType
Mesh object created by reading the file.
Raises
------
TypeError
If cannot determine the input mesh type.
Notes
-----
Currently only SMS-2DM and GRD formats are supported for
reading. | ocsmesh/mesh/mesh.py | open | noaa-ocs-modeling/OCSMesh | python | @staticmethod
def open(path: Union[(str, Path)], crs: Optional[CRS]=None) -> MeshType:
'Read mesh from a file on disk\n\n Parameters\n ----------\n path : path-like\n Path to the file containig mesh.\n crs : CRS or None, default=None\n CRS of the mesh in the path. Overwrites any info read\n from file, no transformation is done.\n\n Returns\n -------\n MeshType\n Mesh object created by reading the file.\n\n Raises\n ------\n TypeError\n If cannot determine the input mesh type.\n\n Notes\n -----\n Currently only SMS-2DM and GRD formats are supported for\n reading.\n '
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e:
if ('not a valid grd file' in str(e)):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e:
pass
raise TypeError(f'Unable to automatically determine file type for {str(path)}.') |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes the ring calculator object for the input `mesh`\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object calculates rings.\n '
self.mesh = mesh | 918,915,827,880,436,700 | Initializes the ring calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates rings. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes the ring calculator object for the input `mesh`\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object calculates rings.\n '
self.mesh = mesh |
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calcluates all the polygons of the mesh and extracts its rings.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all rings of the mesh hull polygon.\n The rings are in the form of `shapely.geometry.LinearRing`.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({'geometry': poly.exterior, 'bnd_id': bnd_id, 'type': 'exterior'})
for interior in poly.interiors:
data.append({'geometry': interior, 'bnd_id': bnd_id, 'type': 'interior'})
bnd_id = (bnd_id + 1)
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | 273,310,606,786,646,370 | Calcluates all the polygons of the mesh and extracts its rings.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all rings of the mesh hull polygon.
The rings are in the form of `shapely.geometry.LinearRing`.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calcluates all the polygons of the mesh and extracts its rings.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all rings of the mesh hull polygon.\n The rings are in the form of `shapely.geometry.LinearRing`.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({'geometry': poly.exterior, 'bnd_id': bnd_id, 'type': 'exterior'})
for interior in poly.interiors:
data.append({'geometry': interior, 'bnd_id': bnd_id, 'type': 'interior'})
bnd_id = (bnd_id + 1)
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def exterior(self) -> gpd.GeoDataFrame:
'Extracts the exterior ring from the results of `__call__`.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing exterior ring of the mesh hull polygon.\n '
return self().loc[(self()['type'] == 'exterior')] | 8,858,591,886,772,095,000 | Extracts the exterior ring from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior ring of the mesh hull polygon. | ocsmesh/mesh/mesh.py | exterior | noaa-ocs-modeling/OCSMesh | python | def exterior(self) -> gpd.GeoDataFrame:
'Extracts the exterior ring from the results of `__call__`.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing exterior ring of the mesh hull polygon.\n '
return self().loc[(self()['type'] == 'exterior')] |
def interior(self) -> gpd.GeoDataFrame:
'Extracts the interior rings from the results of `__call__`.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing interior rings of the mesh hull polygon.\n '
return self().loc[(self()['type'] == 'interior')] | 8,726,658,138,874,750,000 | Extracts the interior rings from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior rings of the mesh hull polygon. | ocsmesh/mesh/mesh.py | interior | noaa-ocs-modeling/OCSMesh | python | def interior(self) -> gpd.GeoDataFrame:
'Extracts the interior rings from the results of `__call__`.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing interior rings of the mesh hull polygon.\n '
return self().loc[(self()['type'] == 'interior')] |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes the edge calculator object for the input `mesh`\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which boundary edges are calculated.\n '
self.mesh = mesh | 1,966,865,381,158,875,600 | Initializes the edge calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which boundary edges are calculated. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes the edge calculator object for the input `mesh`\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which boundary edges are calculated.\n '
self.mesh = mesh |
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calculates all boundary edges for the mesh.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all boundary edges of the mesh in\n the form of `shapely.geometry.LineString` for each\n coordinate couple.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({'geometry': LineString([coords[(i - 1)], coords[i]]), 'bnd_id': ring.bnd_id, 'type': ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | -2,760,478,559,937,339,000 | Calculates all boundary edges for the mesh.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all boundary edges of the mesh in
the form of `shapely.geometry.LineString` for each
coordinate couple.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calculates all boundary edges for the mesh.\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all boundary edges of the mesh in\n the form of `shapely.geometry.LineString` for each\n coordinate couple.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({'geometry': LineString([coords[(i - 1)], coords[i]]), 'bnd_id': ring.bnd_id, 'type': ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def exterior(self) -> gpd.GeoDataFrame:
'Retruns exterior boundary edges from the results of `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing exterior boundary edges of the mesh in\n the form of line string couples.\n '
return self().loc[(self()['type'] == 'exterior')] | 8,433,150,425,821,374,000 | Retruns exterior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior boundary edges of the mesh in
the form of line string couples. | ocsmesh/mesh/mesh.py | exterior | noaa-ocs-modeling/OCSMesh | python | def exterior(self) -> gpd.GeoDataFrame:
'Retruns exterior boundary edges from the results of `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing exterior boundary edges of the mesh in\n the form of line string couples.\n '
return self().loc[(self()['type'] == 'exterior')] |
def interior(self) -> gpd.GeoDataFrame:
'Retruns interior boundary edges from the results of `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing interior boundary edges of the mesh in\n the form of line string couples.\n '
return self().loc[(self()['type'] == 'interior')] | 4,926,577,126,765,426,000 | Retruns interior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior boundary edges of the mesh in
the form of line string couples. | ocsmesh/mesh/mesh.py | interior | noaa-ocs-modeling/OCSMesh | python | def interior(self) -> gpd.GeoDataFrame:
'Retruns interior boundary edges from the results of `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing interior boundary edges of the mesh in\n the form of line string couples.\n '
return self().loc[(self()['type'] == 'interior')] |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize helper class for handling mesh hull calculations\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which hull calculations are done.\n\n Notes\n -----\n This object holds onto the ring and edge calculator objects\n as well as a reference to the input mesh.\n '
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh) | -3,874,946,093,954,868,700 | Initialize helper class for handling mesh hull calculations
Parameters
----------
mesh : EuclideanMesh
Input mesh for which hull calculations are done.
Notes
-----
This object holds onto the ring and edge calculator objects
as well as a reference to the input mesh. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize helper class for handling mesh hull calculations\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which hull calculations are done.\n\n Notes\n -----\n This object holds onto the ring and edge calculator objects\n as well as a reference to the input mesh.\n '
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh) |
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calculates all polygons of the mesh including domain islands\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all polygons of the mesh.\n\n See Also\n --------\n implode()\n Dataframe with a single combined multipolygon.\n multipolygon()\n `shapely` multipolygon shape of combined mesh polygons.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[((self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior'))]
interiors = self.rings().loc[((self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior'))]
data.append({'geometry': Polygon(exterior.iloc[0].geometry.coords, [row.geometry.coords for (_, row) in interiors.iterrows()]), 'bnd_id': bnd_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | -3,732,285,811,006,009,300 | Calculates all polygons of the mesh including domain islands
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all polygons of the mesh.
See Also
--------
implode()
Dataframe with a single combined multipolygon.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Calculates all polygons of the mesh including domain islands\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing all polygons of the mesh.\n\n See Also\n --------\n implode()\n Dataframe with a single combined multipolygon.\n multipolygon()\n `shapely` multipolygon shape of combined mesh polygons.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[((self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior'))]
interiors = self.rings().loc[((self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior'))]
data.append({'geometry': Polygon(exterior.iloc[0].geometry.coords, [row.geometry.coords for (_, row) in interiors.iterrows()]), 'bnd_id': bnd_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def exterior(self) -> gpd.GeoDataFrame:
'Creates polygons from exterior rings of the mesh hull\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Polygons created from exterior rings of the mesh hull\n '
data = []
for exterior in self.rings().loc[(self.rings()['type'] == 'exterior')].itertuples():
data.append({'geometry': Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | -2,137,415,256,579,247,900 | Creates polygons from exterior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from exterior rings of the mesh hull | ocsmesh/mesh/mesh.py | exterior | noaa-ocs-modeling/OCSMesh | python | def exterior(self) -> gpd.GeoDataFrame:
'Creates polygons from exterior rings of the mesh hull\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Polygons created from exterior rings of the mesh hull\n '
data = []
for exterior in self.rings().loc[(self.rings()['type'] == 'exterior')].itertuples():
data.append({'geometry': Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def interior(self) -> gpd.GeoDataFrame:
'Creates polygons from interior rings of the mesh hull\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Polygons created from interior rings of the mesh hull\n '
data = []
for interior in self.rings().loc[(self.rings()['type'] == 'interior')].itertuples():
data.append({'geometry': Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | 4,092,416,981,451,248,600 | Creates polygons from interior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from interior rings of the mesh hull | ocsmesh/mesh/mesh.py | interior | noaa-ocs-modeling/OCSMesh | python | def interior(self) -> gpd.GeoDataFrame:
'Creates polygons from interior rings of the mesh hull\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Polygons created from interior rings of the mesh hull\n '
data = []
for interior in self.rings().loc[(self.rings()['type'] == 'interior')].itertuples():
data.append({'geometry': Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def implode(self) -> gpd.GeoDataFrame:
'Creates a dataframe from mesh polygons.\n\n Parameters\n ----------\n\n Returns\n ------\n gpd.GeoDataFrame\n Dataframe containing polygons of the mesh.\n\n See Also\n --------\n __call__()\n Dataframe with multiple polygon and boundary ID entries\n of the mesh polygons.\n multipolygon()\n `shapely` multipolygon shape of combined mesh polygons.\n\n Notes\n -----\n The difference of the return value of this method and\n `__call__` is that the `implode` returns a dataframe with\n a single `MultiPolygon` where as `__call__` returns a\n dataframe with multiple `Polygon` entries with associated\n `bnd_id`.\n '
return gpd.GeoDataFrame({'geometry': MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) | -1,110,060,583,713,869,800 | Creates a dataframe from mesh polygons.
Parameters
----------
Returns
------
gpd.GeoDataFrame
Dataframe containing polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The difference of the return value of this method and
`__call__` is that the `implode` returns a dataframe with
a single `MultiPolygon` where as `__call__` returns a
dataframe with multiple `Polygon` entries with associated
`bnd_id`. | ocsmesh/mesh/mesh.py | implode | noaa-ocs-modeling/OCSMesh | python | def implode(self) -> gpd.GeoDataFrame:
'Creates a dataframe from mesh polygons.\n\n Parameters\n ----------\n\n Returns\n ------\n gpd.GeoDataFrame\n Dataframe containing polygons of the mesh.\n\n See Also\n --------\n __call__()\n Dataframe with multiple polygon and boundary ID entries\n of the mesh polygons.\n multipolygon()\n `shapely` multipolygon shape of combined mesh polygons.\n\n Notes\n -----\n The difference of the return value of this method and\n `__call__` is that the `implode` returns a dataframe with\n a single `MultiPolygon` where as `__call__` returns a\n dataframe with multiple `Polygon` entries with associated\n `bnd_id`.\n '
return gpd.GeoDataFrame({'geometry': MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) |
def multipolygon(self) -> MultiPolygon:
'Returns mesh multi-polygons.\n\n Parameters\n ----------\n\n Returns\n ------\n MultiPolygon\n Combined shape of polygons of the mesh.\n\n See Also\n --------\n __call__()\n Dataframe with multiple polygon and boundary ID entries\n of the mesh polygons.\n implode()\n Dataframe with a single combined multipolygon of the mesh\n polygons.\n\n Notes\n -----\n The difference of the return value of this method and `implode`\n is that `multipolygon` returns a `MultiPolygon` object where\n as `implode` returns a dataframe warpping the multipolygon\n object.\n '
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp | -4,280,634,786,239,775,000 | Returns mesh multi-polygons.
Parameters
----------
Returns
------
MultiPolygon
Combined shape of polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
implode()
Dataframe with a single combined multipolygon of the mesh
polygons.
Notes
-----
The difference of the return value of this method and `implode`
is that `multipolygon` returns a `MultiPolygon` object where
as `implode` returns a dataframe warpping the multipolygon
object. | ocsmesh/mesh/mesh.py | multipolygon | noaa-ocs-modeling/OCSMesh | python | def multipolygon(self) -> MultiPolygon:
'Returns mesh multi-polygons.\n\n Parameters\n ----------\n\n Returns\n ------\n MultiPolygon\n Combined shape of polygons of the mesh.\n\n See Also\n --------\n __call__()\n Dataframe with multiple polygon and boundary ID entries\n of the mesh polygons.\n implode()\n Dataframe with a single combined multipolygon of the mesh\n polygons.\n\n Notes\n -----\n The difference of the return value of this method and `implode`\n is that `multipolygon` returns a `MultiPolygon` object where\n as `implode` returns a dataframe warpping the multipolygon\n object.\n '
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp |
def triangulation(self) -> Triangulation:
'Create triangulation object from all the mesh elements.\n\n Parameters\n ----------\n\n Returns\n -------\n Triangulation\n The `matplotlib` triangulation object create from all\n the elements of the parent mesh.\n\n Notes\n -----\n Currently only tria3 and quad4 elements are considered.\n '
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([[quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]]])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) | -3,367,212,795,462,451,700 | Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered. | ocsmesh/mesh/mesh.py | triangulation | noaa-ocs-modeling/OCSMesh | python | def triangulation(self) -> Triangulation:
'Create triangulation object from all the mesh elements.\n\n Parameters\n ----------\n\n Returns\n -------\n Triangulation\n The `matplotlib` triangulation object create from all\n the elements of the parent mesh.\n\n Notes\n -----\n Currently only tria3 and quad4 elements are considered.\n '
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([[quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]]])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes node handler helper object.\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object handles nodes info.\n '
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None | 7,583,967,430,143,226,000 | Initializes node handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles nodes info. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initializes node handler helper object.\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object handles nodes info.\n '
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None |
@lru_cache(maxsize=1)
def __call__(self) -> Dict[(int, int)]:
"Creates a mapping between node IDs and indexes.\n\n Parameters\n ----------\n\n Returns\n -------\n dict\n Mapping between node IDs and indexes.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return {(i + 1): coord for (i, coord) in enumerate(self.coords())} | 971,225,677,000,335,900 | Creates a mapping between node IDs and indexes.
Parameters
----------
Returns
-------
dict
Mapping between node IDs and indexes.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> Dict[(int, int)]:
"Creates a mapping between node IDs and indexes.\n\n Parameters\n ----------\n\n Returns\n -------\n dict\n Mapping between node IDs and indexes.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return {(i + 1): coord for (i, coord) in enumerate(self.coords())} |
def id(self) -> List[int]:
'Retrives a list of element IDs.\n\n Parameters\n ----------\n\n Returns\n -------\n list of int\n List of node IDs as created by `__call__`\n '
return list(self().keys()) | -5,521,052,617,407,928,000 | Retrives a list of element IDs.
Parameters
----------
Returns
-------
list of int
List of node IDs as created by `__call__` | ocsmesh/mesh/mesh.py | id | noaa-ocs-modeling/OCSMesh | python | def id(self) -> List[int]:
'Retrives a list of element IDs.\n\n Parameters\n ----------\n\n Returns\n -------\n list of int\n List of node IDs as created by `__call__`\n '
return list(self().keys()) |
def index(self) -> npt.NDArray[int]:
'Retrives an array of element indexes.\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Array of node indexes.\n '
return np.arange(len(self())) | 6,618,088,399,711,052,000 | Retrives an array of element indexes.
Parameters
----------
Returns
-------
array-like
Array of node indexes. | ocsmesh/mesh/mesh.py | index | noaa-ocs-modeling/OCSMesh | python | def index(self) -> npt.NDArray[int]:
'Retrives an array of element indexes.\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Array of node indexes.\n '
return np.arange(len(self())) |
def coords(self) -> npt.NDArray[np.float32]:
'Retrieve the coordinates of mesh nodes\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Coordinates of the mesh nodes as returned by `BaseMesh.coord`\n '
return self.mesh.coord | -5,574,595,264,128,667,000 | Retrieve the coordinates of mesh nodes
Parameters
----------
Returns
-------
array-like
Coordinates of the mesh nodes as returned by `BaseMesh.coord` | ocsmesh/mesh/mesh.py | coords | noaa-ocs-modeling/OCSMesh | python | def coords(self) -> npt.NDArray[np.float32]:
'Retrieve the coordinates of mesh nodes\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Coordinates of the mesh nodes as returned by `BaseMesh.coord`\n '
return self.mesh.coord |
def values(self):
'Retrieve the values stored for mesh nodes\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Values on the mesh nodes as returned by `BaseMesh.values`\n '
return self.mesh.values | 4,970,250,622,496,955,000 | Retrieve the values stored for mesh nodes
Parameters
----------
Returns
-------
array-like
Values on the mesh nodes as returned by `BaseMesh.values` | ocsmesh/mesh/mesh.py | values | noaa-ocs-modeling/OCSMesh | python | def values(self):
'Retrieve the values stored for mesh nodes\n\n Parameters\n ----------\n\n Returns\n -------\n array-like\n Values on the mesh nodes as returned by `BaseMesh.values`\n '
return self.mesh.values |
def get_index_by_id(self, node_id):
'Converts mesh ID to mesh index.\n\n Parameters\n ----------\n node_id : int\n ID of the node of interest\n\n Returns\n -------\n int\n Index of the node of interest\n '
return self.id_to_index[node_id] | -1,355,441,475,551,445,200 | Converts mesh ID to mesh index.
Parameters
----------
node_id : int
ID of the node of interest
Returns
-------
int
Index of the node of interest | ocsmesh/mesh/mesh.py | get_index_by_id | noaa-ocs-modeling/OCSMesh | python | def get_index_by_id(self, node_id):
'Converts mesh ID to mesh index.\n\n Parameters\n ----------\n node_id : int\n ID of the node of interest\n\n Returns\n -------\n int\n Index of the node of interest\n '
return self.id_to_index[node_id] |
def get_id_by_index(self, index: int):
'Converts mesh index to mesh ID.\n\n Parameters\n ----------\n index : int\n Index of the node of interest.\n\n Returns\n -------\n int\n ID of the node of interest\n '
return self.index_to_id[index] | 8,713,344,903,840,962,000 | Converts mesh index to mesh ID.
Parameters
----------
index : int
Index of the node of interest.
Returns
-------
int
ID of the node of interest | ocsmesh/mesh/mesh.py | get_id_by_index | noaa-ocs-modeling/OCSMesh | python | def get_id_by_index(self, index: int):
'Converts mesh index to mesh ID.\n\n Parameters\n ----------\n index : int\n Index of the node of interest.\n\n Returns\n -------\n int\n ID of the node of interest\n '
return self.index_to_id[index] |
@property
def id_to_index(self) -> Dict[(int, int)]:
'Read-only property returning the mapping of ID to index\n\n Notes\n -----\n Although the property is read-only, the return value object\n is a cached mutable dictionary object. Modifying the mesh\n without clearing the cache properly or mutating the\n returned object could result in undefined behavior\n '
if (self._id_to_index is None):
self._id_to_index = {node_id: index for (index, node_id) in enumerate(self().keys())}
return self._id_to_index | -5,844,648,040,583,783,000 | Read-only property returning the mapping of ID to index
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior | ocsmesh/mesh/mesh.py | id_to_index | noaa-ocs-modeling/OCSMesh | python | @property
def id_to_index(self) -> Dict[(int, int)]:
'Read-only property returning the mapping of ID to index\n\n Notes\n -----\n Although the property is read-only, the return value object\n is a cached mutable dictionary object. Modifying the mesh\n without clearing the cache properly or mutating the\n returned object could result in undefined behavior\n '
if (self._id_to_index is None):
self._id_to_index = {node_id: index for (index, node_id) in enumerate(self().keys())}
return self._id_to_index |
@property
def index_to_id(self) -> Dict[(int, int)]:
'Read-only property returning the mapping of index to ID\n\n Notes\n -----\n Although the property is read-only, the return value object\n is a cached mutable dictionary object. Modifying the mesh\n without clearing the cache properly or mutating the\n returned object could result in undefined behavior\n '
if (self._index_to_id is None):
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id | 8,368,307,017,401,396,000 | Read-only property returning the mapping of index to ID
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior | ocsmesh/mesh/mesh.py | index_to_id | noaa-ocs-modeling/OCSMesh | python | @property
def index_to_id(self) -> Dict[(int, int)]:
'Read-only property returning the mapping of index to ID\n\n Notes\n -----\n Although the property is read-only, the return value object\n is a cached mutable dictionary object. Modifying the mesh\n without clearing the cache properly or mutating the\n returned object could result in undefined behavior\n '
if (self._index_to_id is None):
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize the element handler helper object.\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object handles elements info.\n '
self.mesh = mesh | 3,369,410,356,158,437,400 | Initialize the element handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles elements info. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize the element handler helper object.\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object handles elements info.\n '
self.mesh = mesh |
@lru_cache(maxsize=1)
def __call__(self) -> Dict[(int, npt.NDArray[int])]:
"Creates a mapping between element IDs and associated node IDs.\n\n Parameters\n ----------\n\n Returns\n -------\n dict\n Mapping between element IDs and associated node Ids\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
elements = {(i + 1): (index + 1) for (i, index) in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({((i + len(elements)) + 1): (index + 1) for (i, index) in enumerate(self.mesh.msh_t.quad4['index'])})
return elements | 5,356,765,634,915,342,000 | Creates a mapping between element IDs and associated node IDs.
Parameters
----------
Returns
-------
dict
Mapping between element IDs and associated node Ids
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> Dict[(int, npt.NDArray[int])]:
"Creates a mapping between element IDs and associated node IDs.\n\n Parameters\n ----------\n\n Returns\n -------\n dict\n Mapping between element IDs and associated node Ids\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
elements = {(i + 1): (index + 1) for (i, index) in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({((i + len(elements)) + 1): (index + 1) for (i, index) in enumerate(self.mesh.msh_t.quad4['index'])})
return elements |
@lru_cache(maxsize=1)
def id(self) -> List[int]:
"Retrieves the list of element IDs as returned by `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n list of int\n List of element IDs.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return list(self().keys()) | 1,294,986,368,255,677,400 | Retrieves the list of element IDs as returned by `__call__`
Parameters
----------
Returns
-------
list of int
List of element IDs.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | id | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def id(self) -> List[int]:
"Retrieves the list of element IDs as returned by `__call__`\n\n Parameters\n ----------\n\n Returns\n -------\n list of int\n List of element IDs.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return list(self().keys()) |
@lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
"Retrieves an array of element indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 1D array of element indices.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.arange(len(self())) | 6,401,692,417,557,761,000 | Retrieves an array of element indices
Parameters
----------
Returns
-------
npt.NDArray
1D array of element indices.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | index | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
"Retrieves an array of element indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 1D array of element indices.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.arange(len(self())) |
def array(self) -> npt.NDArray[int]:
"Retrieves a masked array of element node IDs.\n\n The return value is ``n x m`` where ``n`` is the number of\n elements and ``m`` is the maximum number of element nodes, e.g.\n if there are only trias, then it's 3, for trias and quads it\n is 4.\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n Masked array where elements with fewer associated nodes\n have trailing masked node columns in the array.\n "
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), (- 1))
for (i, elem_nd_ids) in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, (- 1)) | 7,271,535,590,505,444,000 | Retrieves a masked array of element node IDs.
The return value is ``n x m`` where ``n`` is the number of
elements and ``m`` is the maximum number of element nodes, e.g.
if there are only trias, then it's 3, for trias and quads it
is 4.
Parameters
----------
Returns
-------
npt.NDArray
Masked array where elements with fewer associated nodes
have trailing masked node columns in the array. | ocsmesh/mesh/mesh.py | array | noaa-ocs-modeling/OCSMesh | python | def array(self) -> npt.NDArray[int]:
"Retrieves a masked array of element node IDs.\n\n The return value is ``n x m`` where ``n`` is the number of\n elements and ``m`` is the maximum number of element nodes, e.g.\n if there are only trias, then it's 3, for trias and quads it\n is 4.\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n Masked array where elements with fewer associated nodes\n have trailing masked node columns in the array.\n "
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), (- 1))
for (i, elem_nd_ids) in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, (- 1)) |
@lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
"Retrieves an array of tria element node indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 2D array of element nodes for triangle nodes\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.array([list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if (len(element) == 3)]) | -5,246,321,069,616,125,000 | Retrieves an array of tria element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for triangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | triangles | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
"Retrieves an array of tria element node indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 2D array of element nodes for triangle nodes\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.array([list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if (len(element) == 3)]) |
@lru_cache(maxsize=1)
def quads(self):
"Retrieves an array of quad element node indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 2D array of element nodes for quadrangle nodes\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.array([list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if (len(element) == 4)]) | -7,106,942,006,327,528,000 | Retrieves an array of quad element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for quadrangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | quads | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def quads(self):
"Retrieves an array of quad element node indices\n\n Parameters\n ----------\n\n Returns\n -------\n npt.NDArray\n 2D array of element nodes for quadrangle nodes\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
return np.array([list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if (len(element) == 4)]) |
def triangulation(self) -> Triangulation:
'Create triangulation object from all the mesh elements.\n\n Parameters\n ----------\n\n Returns\n -------\n Triangulation\n The `matplotlib` triangulation object create from all\n the elements of the parent mesh.\n\n Notes\n -----\n Currently only tria3 and quad4 elements are considered.\n '
triangles = self.triangles().tolist()
for quad in self.quads():
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) | -1,068,129,071,886,108,900 | Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered. | ocsmesh/mesh/mesh.py | triangulation | noaa-ocs-modeling/OCSMesh | python | def triangulation(self) -> Triangulation:
'Create triangulation object from all the mesh elements.\n\n Parameters\n ----------\n\n Returns\n -------\n Triangulation\n The `matplotlib` triangulation object create from all\n the elements of the parent mesh.\n\n Notes\n -----\n Currently only tria3 and quad4 elements are considered.\n '
triangles = self.triangles().tolist()
for quad in self.quads():
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) |
def geodataframe(self) -> gpd.GeoDataFrame:
'Create polygons for each element and return in dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe created from entries of `Polygon` type for\n each element.\n '
data = []
for (elem_id, elem_nd_ids) in self().items():
data.append({'geometry': Polygon(self.mesh.coord[list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | -1,141,472,162,654,111,200 | Create polygons for each element and return in dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe created from entries of `Polygon` type for
each element. | ocsmesh/mesh/mesh.py | geodataframe | noaa-ocs-modeling/OCSMesh | python | def geodataframe(self) -> gpd.GeoDataFrame:
'Create polygons for each element and return in dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe created from entries of `Polygon` type for\n each element.\n '
data = []
for (elem_id, elem_nd_ids) in self().items():
data.append({'geometry': Polygon(self.mesh.coord[list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize boundary helper object\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object calculates boundaries.\n '
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict) | 6,331,604,948,413,647,000 | Initialize boundary helper object
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates boundaries. | ocsmesh/mesh/mesh.py | __init__ | noaa-ocs-modeling/OCSMesh | python | def __init__(self, mesh: EuclideanMesh) -> None:
'Initialize boundary helper object\n\n Parameters\n ----------\n mesh : EuclideanMesh\n Input mesh for which this object calculates boundaries.\n '
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict) |
@lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
"Internal: Creates boundary dataframes based on boundary data\n\n Parameters\n ----------\n\n Returns\n -------\n None\n\n Notes\n -----\n This method doesn't have any return value, but it is cached\n so that on re-execution it doesn't recalculate.\n "
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if (boundaries is not None):
for (ibtype, bnds) in boundaries.items():
if (ibtype is None):
for (bnd_id, data) in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes']))
ocean_boundaries.append({'id': bnd_id, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
elif str(ibtype).endswith('1'):
for (bnd_id, data) in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes']))
interior_boundaries.append({'id': bnd_id, 'ibtype': ibtype, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
else:
for (bnd_id, data) in bnds.items():
_indexes = np.array(data['indexes'])
if (_indexes.ndim > 1):
new_indexes = []
for (i, line) in enumerate(_indexes.T):
if ((i % 2) != 0):
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes))
land_boundaries.append({'id': bnd_id, 'ibtype': ibtype, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries) | -1,658,431,923,495,654,700 | Internal: Creates boundary dataframes based on boundary data
Parameters
----------
Returns
-------
None
Notes
-----
This method doesn't have any return value, but it is cached
so that on re-execution it doesn't recalculate. | ocsmesh/mesh/mesh.py | _init_dataframes | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
"Internal: Creates boundary dataframes based on boundary data\n\n Parameters\n ----------\n\n Returns\n -------\n None\n\n Notes\n -----\n This method doesn't have any return value, but it is cached\n so that on re-execution it doesn't recalculate.\n "
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if (boundaries is not None):
for (ibtype, bnds) in boundaries.items():
if (ibtype is None):
for (bnd_id, data) in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes']))
ocean_boundaries.append({'id': bnd_id, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
elif str(ibtype).endswith('1'):
for (bnd_id, data) in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes']))
interior_boundaries.append({'id': bnd_id, 'ibtype': ibtype, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
else:
for (bnd_id, data) in bnds.items():
_indexes = np.array(data['indexes'])
if (_indexes.ndim > 1):
new_indexes = []
for (i, line) in enumerate(_indexes.T):
if ((i % 2) != 0):
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes))
land_boundaries.append({'id': bnd_id, 'ibtype': ibtype, 'index_id': data['indexes'], 'indexes': indexes, 'geometry': LineString(self.mesh.coord[indexes])})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries) |
def ocean(self) -> gpd.GeoDataFrame:
'Retrieve the ocean boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n ocean open boundary.\n '
self._init_dataframes()
return self._ocean | -2,385,565,405,689,421,000 | Retrieve the ocean boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
ocean open boundary. | ocsmesh/mesh/mesh.py | ocean | noaa-ocs-modeling/OCSMesh | python | def ocean(self) -> gpd.GeoDataFrame:
'Retrieve the ocean boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n ocean open boundary.\n '
self._init_dataframes()
return self._ocean |
def land(self):
'Retrieve the land boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n land boundary.\n '
self._init_dataframes()
return self._land | 3,081,945,119,974,210,600 | Retrieve the land boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
land boundary. | ocsmesh/mesh/mesh.py | land | noaa-ocs-modeling/OCSMesh | python | def land(self):
'Retrieve the land boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n land boundary.\n '
self._init_dataframes()
return self._land |
def interior(self):
'Retrieve the island boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n island boundary.\n '
self._init_dataframes()
return self._interior | 2,270,183,772,908,834,600 | Retrieve the island boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
island boundary. | ocsmesh/mesh/mesh.py | interior | noaa-ocs-modeling/OCSMesh | python | def interior(self):
'Retrieve the island boundary information dataframe\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing the geometry and information of\n island boundary.\n '
self._init_dataframes()
return self._interior |
@property
def data(self) -> Dict[(Optional[int], Any)]:
'Read-only property referencing the boundary data dictionary'
return self._data | -235,149,111,437,052,400 | Read-only property referencing the boundary data dictionary | ocsmesh/mesh/mesh.py | data | noaa-ocs-modeling/OCSMesh | python | @property
def data(self) -> Dict[(Optional[int], Any)]:
return self._data |
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Retrieve the dataframe for all boundaries information\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing information for all boundaries shape\n and type.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({'id': bnd.id, 'ibtype': None, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({'id': bnd.id, 'ibtype': bnd.ibtype, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({'id': bnd.id, 'ibtype': bnd.ibtype, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) | 6,958,329,802,786,839,000 | Retrieve the dataframe for all boundaries information
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing information for all boundaries shape
and type.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values. | ocsmesh/mesh/mesh.py | __call__ | noaa-ocs-modeling/OCSMesh | python | @lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"Retrieve the dataframe for all boundaries information\n\n Parameters\n ----------\n\n Returns\n -------\n gpd.GeoDataFrame\n Dataframe containing information for all boundaries shape\n and type.\n\n Notes\n -----\n The result of this method is cached, so that multiple calls\n to it won't result in multiple calculations. If the mesh\n is modified and the cache is not properly clear the calls\n to this method can result in invalid return values.\n "
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({'id': bnd.id, 'ibtype': None, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({'id': bnd.id, 'ibtype': bnd.ibtype, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({'id': bnd.id, 'ibtype': bnd.ibtype, 'index_id': bnd.index_id, 'indexes': bnd.indexes, 'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs) |
def __len__(self) -> int:
'Returns the number of boundary segments'
return len(self()) | -2,137,916,475,226,730,500 | Returns the number of boundary segments | ocsmesh/mesh/mesh.py | __len__ | noaa-ocs-modeling/OCSMesh | python | def __len__(self) -> int:
return len(self()) |
def auto_generate(self, threshold: float=0.0, land_ibtype: int=0, interior_ibtype: int=1):
'Automatically detect boundaries based on elevation data.\n\n Parameters\n ----------\n threshold : float, default=0\n Threshold above which nodes are considered dry nodes\n for ocean vs land boundary detection\n land_ibtype : int, default=0\n Value to assign to land boundary type\n interior_ibtype : int, default=1\n Value to assign to island boundary type\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If any of the values assigned to a mesh node is `np.nan`.\n\n Notes\n -----\n An edge is considered dry if any of the attached nodes are\n dry (its elevation is larger than or equal to the `threshold`).\n '
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError('Mesh contains invalid values. Raster values mustbe interpolated to the mesh before generating boundaries.')
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {tuple(coo): idx for (idx, coo) in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([(coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[(e + 1)]]) for (e, coo) in enumerate(ext_ring_coo[:(- 1)])])
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[(np.where((values[ext_ring[:, 0]] < threshold))[0], 0)] = (- 1)
edge_tag[(np.where((values[ext_ring[:, 1]] < threshold))[0], 1)] = (- 1)
edge_tag[(np.where((values[ext_ring[:, 0]] >= threshold))[0], 0)] = 1
edge_tag[(np.where((values[ext_ring[:, 1]] >= threshold))[0], 1)] = 1
ocean_boundary = []
land_boundary = []
for (i, (e0, e1)) in enumerate(edge_tag):
if np.any((np.asarray((e0, e1)) == 1)):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any((np.asarray((e0, e1)) == (- 1))):
ocean_boundary.append(tuple(ext_ring[i, :]))
ocean_boundaries = []
if (len(ocean_boundary) != 0):
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = ([ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs)
ocean_boundaries = [[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[(e + 1)]]) for (e, coo) in enumerate(seg.coords[:(- 1)])] for seg in ocean_segs]
land_boundaries = []
if (len(land_boundary) != 0):
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = ([land_segs] if isinstance(land_segs, LineString) else land_segs)
land_boundaries = [[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[(e + 1)]]) for (e, coo) in enumerate(seg.coords[:(- 1)])] for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
(e0, e1) = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = (e0 + [get_id(e1[(- 1)])])
boundaries[None][_bnd_id] = bdry_type(indexes=data, properties={})
_bnd_id += 1
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
(e0, e1) = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = (e0 + [get_id(e1[(- 1)])])
boundaries[land_ibtype][_bnd_id] = bdry_type(indexes=data, properties={})
_bnd_id += 1
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [(coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[(e + 1)]]) for (e, coo) in enumerate(int_ring_coo[:(- 1)])]
(e0, e1) = [list(t) for t in zip(*int_ring)]
if (utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0):
e0 = e0[::(- 1)]
e1 = e1[::(- 1)]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for (bnd_id, data) in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes() | 3,523,008,198,753,785,300 | Automatically detect boundaries based on elevation data.
Parameters
----------
threshold : float, default=0
Threshold above which nodes are considered dry nodes
for ocean vs land boundary detection
land_ibtype : int, default=0
Value to assign to land boundary type
interior_ibtype : int, default=1
Value to assign to island boundary type
Returns
-------
None
Raises
------
ValueError
If any of the values assigned to a mesh node is `np.nan`.
Notes
-----
An edge is considered dry if any of the attached nodes are
dry (its elevation is larger than or equal to the `threshold`). | ocsmesh/mesh/mesh.py | auto_generate | noaa-ocs-modeling/OCSMesh | python | def auto_generate(self, threshold: float=0.0, land_ibtype: int=0, interior_ibtype: int=1):
'Automatically detect boundaries based on elevation data.\n\n Parameters\n ----------\n threshold : float, default=0\n Threshold above which nodes are considered dry nodes\n for ocean vs land boundary detection\n land_ibtype : int, default=0\n Value to assign to land boundary type\n interior_ibtype : int, default=1\n Value to assign to island boundary type\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If any of the values assigned to a mesh node is `np.nan`.\n\n Notes\n -----\n An edge is considered dry if any of the attached nodes are\n dry (its elevation is larger than or equal to the `threshold`).\n '
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError('Mesh contains invalid values. Raster values mustbe interpolated to the mesh before generating boundaries.')
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {tuple(coo): idx for (idx, coo) in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([(coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[(e + 1)]]) for (e, coo) in enumerate(ext_ring_coo[:(- 1)])])
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[(np.where((values[ext_ring[:, 0]] < threshold))[0], 0)] = (- 1)
edge_tag[(np.where((values[ext_ring[:, 1]] < threshold))[0], 1)] = (- 1)
edge_tag[(np.where((values[ext_ring[:, 0]] >= threshold))[0], 0)] = 1
edge_tag[(np.where((values[ext_ring[:, 1]] >= threshold))[0], 1)] = 1
ocean_boundary = []
land_boundary = []
for (i, (e0, e1)) in enumerate(edge_tag):
if np.any((np.asarray((e0, e1)) == 1)):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any((np.asarray((e0, e1)) == (- 1))):
ocean_boundary.append(tuple(ext_ring[i, :]))
ocean_boundaries = []
if (len(ocean_boundary) != 0):
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = ([ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs)
ocean_boundaries = [[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[(e + 1)]]) for (e, coo) in enumerate(seg.coords[:(- 1)])] for seg in ocean_segs]
land_boundaries = []
if (len(land_boundary) != 0):
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = ([land_segs] if isinstance(land_segs, LineString) else land_segs)
land_boundaries = [[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[(e + 1)]]) for (e, coo) in enumerate(seg.coords[:(- 1)])] for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
(e0, e1) = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = (e0 + [get_id(e1[(- 1)])])
boundaries[None][_bnd_id] = bdry_type(indexes=data, properties={})
_bnd_id += 1
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
(e0, e1) = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = (e0 + [get_id(e1[(- 1)])])
boundaries[land_ibtype][_bnd_id] = bdry_type(indexes=data, properties={})
_bnd_id += 1
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [(coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[(e + 1)]]) for (e, coo) in enumerate(int_ring_coo[:(- 1)])]
(e0, e1) = [list(t) for t in zip(*int_ring)]
if (utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0):
e0 = e0[::(- 1)]
e1 = e1[::(- 1)]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for (bnd_id, data) in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes() |
def test_BasicDatasetProfiler_null_column():
'\n The profiler should determine that null columns are of null cardinality and of null type and\n not to generate expectations specific to types and cardinality categories.\n\n We verify this by running the basic profiler on a Pandas dataset with an empty column\n and asserting the number of successful results for the empty columns.\n '
toy_dataset = PandasDataset({'x': [1, 2, 3], 'y': [None, None, None]}, data_asset_name='toy_dataset')
assert (len(toy_dataset.get_expectation_suite(suppress_warnings=True)['expectations']) == 0)
(expectations_config, evr_config) = BasicDatasetProfiler.profile(toy_dataset)
assert (len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'y') and result['success'])]) == 4)
assert (len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'y') and result['success'])]) < len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'x') and result['success'])])) | -1,926,499,559,757,927,400 | The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns. | tests/profile/test_profile.py | test_BasicDatasetProfiler_null_column | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_null_column():
'\n The profiler should determine that null columns are of null cardinality and of null type and\n not to generate expectations specific to types and cardinality categories.\n\n We verify this by running the basic profiler on a Pandas dataset with an empty column\n and asserting the number of successful results for the empty columns.\n '
toy_dataset = PandasDataset({'x': [1, 2, 3], 'y': [None, None, None]}, data_asset_name='toy_dataset')
assert (len(toy_dataset.get_expectation_suite(suppress_warnings=True)['expectations']) == 0)
(expectations_config, evr_config) = BasicDatasetProfiler.profile(toy_dataset)
assert (len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'y') and result['success'])]) == 4)
assert (len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'y') and result['success'])]) < len([result for result in evr_config['results'] if ((result['expectation_config']['kwargs'].get('column') == 'x') and result['success'])])) |
def test_BasicDatasetProfiler_partially_null_column(dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n\n "nulls" is the partially null column in the fixture dataset\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_be_unique']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'nulls')])) | 1,171,748,317,385,277,400 | Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset | tests/profile/test_profile.py | test_BasicDatasetProfiler_partially_null_column | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_partially_null_column(dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n\n "nulls" is the partially null column in the fixture dataset\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_be_unique']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'nulls')])) |
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality\n non numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_not_match_regex']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'lowcardnonnum')])) | 8,949,136,718,048,000,000 | Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture. | tests/profile/test_profile.py | test_BasicDatasetProfiler_non_numeric_low_cardinality | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality\n non numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_not_match_regex']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'lowcardnonnum')])) |
def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality\n non numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_not_match_regex']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'highcardnonnum')])) | -8,345,396,114,734,184,000 | Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture. | tests/profile/test_profile.py | test_BasicDatasetProfiler_non_numeric_high_cardinality | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality\n non numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert (set(['expect_column_to_exist', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_not_match_regex']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations'] if (expectation['kwargs'].get('column') == 'highcardnonnum')])) |
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality\n numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert (set(['expect_column_to_exist', 'expect_table_row_count_to_be_between', 'expect_table_columns_to_match_ordered_list', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_be_unique']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations']])) | 4,874,539,107,115,850,000 | Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture. | tests/profile/test_profile.py | test_BasicDatasetProfiler_numeric_high_cardinality | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
'\n Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality\n numeric column.\n The test is executed against all the backends (Pandas, Spark, etc.), because it uses\n the fixture.\n '
(expectations_config, evr_config) = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert (set(['expect_column_to_exist', 'expect_table_row_count_to_be_between', 'expect_table_columns_to_match_ordered_list', 'expect_column_values_to_be_in_type_list', 'expect_column_unique_value_count_to_be_between', 'expect_column_proportion_of_unique_values_to_be_between', 'expect_column_values_to_not_be_null', 'expect_column_values_to_be_in_set', 'expect_column_values_to_be_unique']) == set([expectation['expectation_type'] for expectation in expectations_config['expectations']])) |
def test_context_profiler(empty_data_context, filesystem_csv_2):
"This just validates that it's possible to profile using the datasource hook, and have\n validation results available in the DataContext"
empty_data_context.add_datasource('my_datasource', module_name='great_expectations.datasource', class_name='PandasDatasource', base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert (not_so_empty_data_context.list_expectation_suite_keys() == [])
not_so_empty_data_context.profile_datasource('my_datasource', profiler=BasicDatasetProfiler)
assert (len(not_so_empty_data_context.list_expectation_suite_keys()) == 1)
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', 'BasicDatasetProfiler')
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations['expectations']:
assert ('BasicDatasetProfiler' in exp['meta'])
assert ('confidence' in exp['meta']['BasicDatasetProfiler'])
assert (profiled_expectations['data_asset_name'] == 'my_datasource/default/f1')
assert (profiled_expectations['expectation_suite_name'] == 'BasicDatasetProfiler')
assert ('batch_kwargs' in profiled_expectations['meta']['BasicDatasetProfiler'])
assert (len(profiled_expectations['expectations']) > 0) | -18,379,510,840,733,516 | This just validates that it's possible to profile using the datasource hook, and have
validation results available in the DataContext | tests/profile/test_profile.py | test_context_profiler | AdamHepner/great_expectations | python | def test_context_profiler(empty_data_context, filesystem_csv_2):
"This just validates that it's possible to profile using the datasource hook, and have\n validation results available in the DataContext"
empty_data_context.add_datasource('my_datasource', module_name='great_expectations.datasource', class_name='PandasDatasource', base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert (not_so_empty_data_context.list_expectation_suite_keys() == [])
not_so_empty_data_context.profile_datasource('my_datasource', profiler=BasicDatasetProfiler)
assert (len(not_so_empty_data_context.list_expectation_suite_keys()) == 1)
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', 'BasicDatasetProfiler')
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations['expectations']:
assert ('BasicDatasetProfiler' in exp['meta'])
assert ('confidence' in exp['meta']['BasicDatasetProfiler'])
assert (profiled_expectations['data_asset_name'] == 'my_datasource/default/f1')
assert (profiled_expectations['expectation_suite_name'] == 'BasicDatasetProfiler')
assert ('batch_kwargs' in profiled_expectations['meta']['BasicDatasetProfiler'])
assert (len(profiled_expectations['expectations']) > 0) |
def test_BasicDatasetProfiler_on_titanic():
'\n A snapshot test for BasicDatasetProfiler.\n We are running the profiler on the Titanic dataset\n and comparing the EVRs to ones retrieved from a\n previously stored file.\n '
df = ge.read_csv('./tests/test_sets/Titanic.csv')
(suite, evrs) = df.profile(BasicDatasetProfiler)
print(json.dumps(suite['meta'], indent=2))
assert ('columns' in suite['meta'])
for (k, v) in suite['meta']['columns'].items():
assert (v == {'description': ''})
evrs = df.validate(result_format='SUMMARY')
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop('meta')
evrs.pop('meta')
for result in evrs['results']:
if ('partial_unexpected_counts' in result['result']):
result['result'].pop('partial_unexpected_counts')
for result in expected_evrs['results']:
if ('partial_unexpected_counts' in result['result']):
result['result'].pop('partial_unexpected_counts')
if (not PY2):
assertDeepAlmostEqual(expected_evrs, evrs) | 8,954,095,020,677,000,000 | A snapshot test for BasicDatasetProfiler.
We are running the profiler on the Titanic dataset
and comparing the EVRs to ones retrieved from a
previously stored file. | tests/profile/test_profile.py | test_BasicDatasetProfiler_on_titanic | AdamHepner/great_expectations | python | def test_BasicDatasetProfiler_on_titanic():
'\n A snapshot test for BasicDatasetProfiler.\n We are running the profiler on the Titanic dataset\n and comparing the EVRs to ones retrieved from a\n previously stored file.\n '
df = ge.read_csv('./tests/test_sets/Titanic.csv')
(suite, evrs) = df.profile(BasicDatasetProfiler)
print(json.dumps(suite['meta'], indent=2))
assert ('columns' in suite['meta'])
for (k, v) in suite['meta']['columns'].items():
assert (v == {'description': })
evrs = df.validate(result_format='SUMMARY')
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop('meta')
evrs.pop('meta')
for result in evrs['results']:
if ('partial_unexpected_counts' in result['result']):
result['result'].pop('partial_unexpected_counts')
for result in expected_evrs['results']:
if ('partial_unexpected_counts' in result['result']):
result['result'].pop('partial_unexpected_counts')
if (not PY2):
assertDeepAlmostEqual(expected_evrs, evrs) |
def concat_data(labelsfile, notes_file):
'\n INPUTS:\n labelsfile: sorted by hadm id, contains one label per line\n notes_file: sorted by hadm id, contains one note per line\n '
with open(labelsfile, 'r') as lf:
print('CONCATENATING')
with open(notes_file, 'r') as notesfile:
outfilename = ('%s/notes_labeled.csv' % MIMIC_3_DIR)
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for (i, (subj_id, text, hadm_id)) in enumerate(notes_gen):
if ((i % 10000) == 0):
print((str(i) + ' done'))
(cur_subj, cur_labels, cur_hadm) = next(labels_gen)
if (cur_hadm == hadm_id):
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename | -3,154,365,080,102,165,500 | INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line | dataproc/concat_and_split.py | concat_data | franzbischoff/caml-mimic | python | def concat_data(labelsfile, notes_file):
'\n INPUTS:\n labelsfile: sorted by hadm id, contains one label per line\n notes_file: sorted by hadm id, contains one note per line\n '
with open(labelsfile, 'r') as lf:
print('CONCATENATING')
with open(notes_file, 'r') as notesfile:
outfilename = ('%s/notes_labeled.csv' % MIMIC_3_DIR)
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for (i, (subj_id, text, hadm_id)) in enumerate(notes_gen):
if ((i % 10000) == 0):
print((str(i) + ' done'))
(cur_subj, cur_labels, cur_hadm) = next(labels_gen)
if (cur_hadm == hadm_id):
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename |
def next_labels(labelsfile):
'\n Generator for label sets from the label file\n '
labels_reader = csv.reader(labelsfile)
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
if ((hadm_id != cur_hadm) or (subj_id != cur_subj)):
(yield (cur_subj, cur_labels, cur_hadm))
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
cur_labels.append(code)
(yield (cur_subj, cur_labels, cur_hadm)) | 7,989,241,263,583,836,000 | Generator for label sets from the label file | dataproc/concat_and_split.py | next_labels | franzbischoff/caml-mimic | python | def next_labels(labelsfile):
'\n \n '
labels_reader = csv.reader(labelsfile)
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
if ((hadm_id != cur_hadm) or (subj_id != cur_subj)):
(yield (cur_subj, cur_labels, cur_hadm))
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
cur_labels.append(code)
(yield (cur_subj, cur_labels, cur_hadm)) |
def next_notes(notesfile):
'\n Generator for notes from the notes file\n This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id\n '
nr = csv.reader(notesfile)
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
if ((hadm_id != cur_hadm) or (subj_id != cur_subj)):
(yield (cur_subj, cur_text, cur_hadm))
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
cur_text += (' ' + text)
(yield (cur_subj, cur_text, cur_hadm)) | 8,890,814,948,314,462,000 | Generator for notes from the notes file
This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id | dataproc/concat_and_split.py | next_notes | franzbischoff/caml-mimic | python | def next_notes(notesfile):
'\n Generator for notes from the notes file\n This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id\n '
nr = csv.reader(notesfile)
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
if ((hadm_id != cur_hadm) or (subj_id != cur_subj)):
(yield (cur_subj, cur_text, cur_hadm))
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
cur_text += (' ' + text)
(yield (cur_subj, cur_text, cur_hadm)) |
def _prediction_loop(self, dataloader: DataLoader, description: str, task_name: str, mode: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics[f'{task_name}_{mode}_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith(f'{task_name}_{mode}_')):
metrics[f'{task_name}_{mode}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) | 4,250,822,879,790,479,400 | Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels. | src/mtl_trainer.py | _prediction_loop | Daupler/CA-MTL | python | def _prediction_loop(self, dataloader: DataLoader, description: str, task_name: str, mode: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics[f'{task_name}_{mode}_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith(f'{task_name}_{mode}_')):
metrics[f'{task_name}_{mode}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) |
def get_health(self, **kwargs):
'Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_health(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str zap_trace_span: OpenTracing span context\n :return: HealthCheck\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs)
else:
data = self.get_health_with_http_info(**kwargs)
return data | 4,016,601,636,423,578,000 | Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread. | influxdb_client/service/health_service.py | get_health | rhajek/influxdb-client-python | python | def get_health(self, **kwargs):
'Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_health(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str zap_trace_span: OpenTracing span context\n :return: HealthCheck\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs)
else:
data = self.get_health_with_http_info(**kwargs)
return data |
def get_health_with_http_info(self, **kwargs):
'Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_health_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str zap_trace_span: OpenTracing span context\n :return: HealthCheck\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['zap_trace_span']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method get_health" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if ('zap_trace_span' in local_var_params):
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = []
return self.api_client.call_api('/health', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='HealthCheck', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) | 4,269,785,489,427,170,000 | Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread. | influxdb_client/service/health_service.py | get_health_with_http_info | rhajek/influxdb-client-python | python | def get_health_with_http_info(self, **kwargs):
'Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_health_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str zap_trace_span: OpenTracing span context\n :return: HealthCheck\n If the method is called asynchronously,\n returns the request thread.\n '
local_var_params = locals()
all_params = ['zap_trace_span']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for (key, val) in six.iteritems(local_var_params['kwargs']):
if (key not in all_params):
raise TypeError(("Got an unexpected keyword argument '%s' to method get_health" % key))
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if ('zap_trace_span' in local_var_params):
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span']
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(['application/json'])
auth_settings = []
return self.api_client.call_api('/health', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='HealthCheck', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) |
def group_policies_gen(flat_policies, config):
'Filter policies using the following steps:\n 1. Apply prioritization among the policies that are sharing the same policy type and resource type\n 2. Remove redundant policies that may applicable across different types of resource\n 3. Filter policies based on type and return\n :param flat_policies: list of flat policies\n :return: Filtered policies\n '
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]['type']]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [(x if isinstance(x, list) else [x]) for x in attrs]
attributes = [(list_flatten(x) if isinstance(x, list) else x) for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if (list(prioritized_policy.keys())[0] not in policy_name):
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies | 5,871,645,693,051,403,000 | Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies | osdf/adapters/policy/utils.py | group_policies_gen | onap/optf-osdf | python | def group_policies_gen(flat_policies, config):
'Filter policies using the following steps:\n 1. Apply prioritization among the policies that are sharing the same policy type and resource type\n 2. Remove redundant policies that may applicable across different types of resource\n 3. Filter policies based on type and return\n :param flat_policies: list of flat policies\n :return: Filtered policies\n '
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]['type']]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [(x if isinstance(x, list) else [x]) for x in attrs]
attributes = [(list_flatten(x) if isinstance(x, list) else x) for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if (list(prioritized_policy.keys())[0] not in policy_name):
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies |
def policy_name_as_regex(policy_name):
'Get the correct policy name as a regex\n (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml\n So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)\n :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy\n :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*\n '
p = policy_name.partition('.')
return ((((p[0] + p[1]) + '.*') + p[2]) + '.*') | 3,389,865,022,879,833,600 | Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.* | osdf/adapters/policy/utils.py | policy_name_as_regex | onap/optf-osdf | python | def policy_name_as_regex(policy_name):
'Get the correct policy name as a regex\n (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml\n So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)\n :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy\n :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*\n '
p = policy_name.partition('.')
return ((((p[0] + p[1]) + '.*') + p[2]) + '.*') |
def retrieve_node(req_json, reference):
'\n Get the child node(s) from the dot-notation [reference] and parent [req_json].\n For placement and other requests, there are encoded JSONs inside the request or policy,\n so we need to expand it and then do a search over the parent plus expanded JSON.\n '
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return (list_flatten(info) if isinstance(info, list) else info) | 8,100,758,929,228,773,000 | Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON. | osdf/adapters/policy/utils.py | retrieve_node | onap/optf-osdf | python | def retrieve_node(req_json, reference):
'\n Get the child node(s) from the dot-notation [reference] and parent [req_json].\n For placement and other requests, there are encoded JSONs inside the request or policy,\n so we need to expand it and then do a search over the parent plus expanded JSON.\n '
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return (list_flatten(info) if isinstance(info, list) else info) |
def read_pkl(path_pkl):
'\n Get a WaterFrame from a pickle file.\n\n Parameters\n ----------\n path_pkl: str\n Location of the pickle file.\n\n Returns\n -------\n wf_pkl: WaterFrame\n '
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, 'rb'))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl | -1,526,109,206,076,102,700 | Get a WaterFrame from a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file.
Returns
-------
wf_pkl: WaterFrame | mooda/input/read_pkl.py | read_pkl | rbardaji/mooda | python | def read_pkl(path_pkl):
'\n Get a WaterFrame from a pickle file.\n\n Parameters\n ----------\n path_pkl: str\n Location of the pickle file.\n\n Returns\n -------\n wf_pkl: WaterFrame\n '
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, 'rb'))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl |
@property
def color(self):
"\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n - A list or array of any of the above\n\n Returns\n -------\n str|numpy.ndarray\n "
return self['color'] | -9,075,663,790,309,021,000 | The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | color | 1abner1/plotly.py | python | @property
def color(self):
"\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n - A list or array of any of the above\n\n Returns\n -------\n str|numpy.ndarray\n "
return self['color'] |
@property
def colorsrc(self):
"\n Sets the source reference on Chart Studio Cloud for color .\n \n The 'colorsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['colorsrc'] | 4,662,598,374,469,181,000 | Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | colorsrc | 1abner1/plotly.py | python | @property
def colorsrc(self):
"\n Sets the source reference on Chart Studio Cloud for color .\n \n The 'colorsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['colorsrc'] |
@property
def family(self):
'\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The Chart Studio Cloud (at https://chart-\n studio.plotly.com or on-premise) generates images on a server,\n where only a select number of fonts are installed and\n supported. These include "Arial", "Balto", "Courier New",\n "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n\n Returns\n -------\n str\n '
return self['family'] | 3,791,649,582,837,001,000 | HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | family | 1abner1/plotly.py | python | @property
def family(self):
'\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The Chart Studio Cloud (at https://chart-\n studio.plotly.com or on-premise) generates images on a server,\n where only a select number of fonts are installed and\n supported. These include "Arial", "Balto", "Courier New",\n "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n\n Returns\n -------\n str\n '
return self['family'] |
@property
def size(self):
"\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n "
return self['size'] | 6,887,128,696,685,480,000 | The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | size | 1abner1/plotly.py | python | @property
def size(self):
"\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n "
return self['size'] |
@property
def sizesrc(self):
"\n Sets the source reference on Chart Studio Cloud for size .\n \n The 'sizesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['sizesrc'] | 4,336,256,729,131,089,000 | Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | sizesrc | 1abner1/plotly.py | python | @property
def sizesrc(self):
"\n Sets the source reference on Chart Studio Cloud for size .\n \n The 'sizesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n "
return self['sizesrc'] |
def __init__(self, arg=None, color=None, colorsrc=None, family=None, size=None, sizesrc=None, **kwargs):
'\n Construct a new Textfont object\n \n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n :class:`plotly.graph_objs.scatter3d.Textfont`\n color\n\n colorsrc\n Sets the source reference on Chart Studio Cloud for\n color .\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The Chart\n Studio Cloud (at https://chart-studio.plotly.com or on-\n premise) generates images on a server, where only a\n select number of fonts are installed and supported.\n These include "Arial", "Balto", "Courier New", "Droid\n Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT\n Sans Narrow", "Raleway", "Times New Roman".\n size\n\n sizesrc\n Sets the source reference on Chart Studio Cloud for\n size .\n\n Returns\n -------\n Textfont\n '
super(Textfont, self).__init__('textfont')
if ('_parent' in kwargs):
self._parent = kwargs['_parent']
return
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly.graph_objs.scatter3d.Textfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatter3d.Textfont`')
self._skip_invalid = kwargs.pop('skip_invalid', False)
self._validate = kwargs.pop('_validate', True)
_v = arg.pop('color', None)
_v = (color if (color is not None) else _v)
if (_v is not None):
self['color'] = _v
_v = arg.pop('colorsrc', None)
_v = (colorsrc if (colorsrc is not None) else _v)
if (_v is not None):
self['colorsrc'] = _v
_v = arg.pop('family', None)
_v = (family if (family is not None) else _v)
if (_v is not None):
self['family'] = _v
_v = arg.pop('size', None)
_v = (size if (size is not None) else _v)
if (_v is not None):
self['size'] = _v
_v = arg.pop('sizesrc', None)
_v = (sizesrc if (sizesrc is not None) else _v)
if (_v is not None):
self['sizesrc'] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False | 5,340,964,613,872,540,000 | Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | __init__ | 1abner1/plotly.py | python | def __init__(self, arg=None, color=None, colorsrc=None, family=None, size=None, sizesrc=None, **kwargs):
'\n Construct a new Textfont object\n \n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of\n :class:`plotly.graph_objs.scatter3d.Textfont`\n color\n\n colorsrc\n Sets the source reference on Chart Studio Cloud for\n color .\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The Chart\n Studio Cloud (at https://chart-studio.plotly.com or on-\n premise) generates images on a server, where only a\n select number of fonts are installed and supported.\n These include "Arial", "Balto", "Courier New", "Droid\n Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT\n Sans Narrow", "Raleway", "Times New Roman".\n size\n\n sizesrc\n Sets the source reference on Chart Studio Cloud for\n size .\n\n Returns\n -------\n Textfont\n '
super(Textfont, self).__init__('textfont')
if ('_parent' in kwargs):
self._parent = kwargs['_parent']
return
if (arg is None):
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError('The first argument to the plotly.graph_objs.scatter3d.Textfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatter3d.Textfont`')
self._skip_invalid = kwargs.pop('skip_invalid', False)
self._validate = kwargs.pop('_validate', True)
_v = arg.pop('color', None)
_v = (color if (color is not None) else _v)
if (_v is not None):
self['color'] = _v
_v = arg.pop('colorsrc', None)
_v = (colorsrc if (colorsrc is not None) else _v)
if (_v is not None):
self['colorsrc'] = _v
_v = arg.pop('family', None)
_v = (family if (family is not None) else _v)
if (_v is not None):
self['family'] = _v
_v = arg.pop('size', None)
_v = (size if (size is not None) else _v)
if (_v is not None):
self['size'] = _v
_v = arg.pop('sizesrc', None)
_v = (sizesrc if (sizesrc is not None) else _v)
if (_v is not None):
self['sizesrc'] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False |
def testConvertHeadersValues(self):
'Tests the _ConvertHeadersValues function.'
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\nX-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\nX-XSS-Protection: 1; mode=block\r\nAlternate-Protocol: 80:quic\r\n\r\n'
expected_headers_value = '[HTTP/1.1 200 OK; Content-Type: image/png; X-Content-Type-Options: nosniff; Content-Length: 2759; X-XSS-Protection: 1; mode=block; Alternate-Protocol: 80:quic]'
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value) | 3,312,209,579,307,669,500 | Tests the _ConvertHeadersValues function. | tests/parsers/esedb_plugins/msie_webcache.py | testConvertHeadersValues | ColdSmoke627/plaso | python | def testConvertHeadersValues(self):
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\nX-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\nX-XSS-Protection: 1; mode=block\r\nAlternate-Protocol: 80:quic\r\n\r\n'
expected_headers_value = '[HTTP/1.1 200 OK; Content-Type: image/png; X-Content-Type-Options: nosniff; Content-Length: 2759; X-XSS-Protection: 1; mode=block; Alternate-Protocol: 80:quic]'
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value) |
def testProcessOnDatabaseWithPartitionsTable(self):
'Tests the Process function on database with a Partitions table.'
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1372)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {'container_identifier': 1, 'data_type': 'msie:webcache:containers', 'date_time': '2014-05-12 07:30:25.4861987', 'directory': 'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\INetCache\\IE\\', 'name': 'Content', 'set_identifier': 0, 'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[573], expected_event_values) | -4,337,249,863,847,990,300 | Tests the Process function on database with a Partitions table. | tests/parsers/esedb_plugins/msie_webcache.py | testProcessOnDatabaseWithPartitionsTable | ColdSmoke627/plaso | python | def testProcessOnDatabaseWithPartitionsTable(self):
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1372)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {'container_identifier': 1, 'data_type': 'msie:webcache:containers', 'date_time': '2014-05-12 07:30:25.4861987', 'directory': 'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\INetCache\\IE\\', 'name': 'Content', 'set_identifier': 0, 'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[573], expected_event_values) |
def testProcessOnDatabaseWithPartitionsExTable(self):
'Tests the Process function on database with a PartitionsEx table.'
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4200)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {'access_count': 5, 'cache_identifier': 0, 'cached_file_size': 726, 'cached_filename': 'b83d57c0[1].svg', 'container_identifier': 14, 'data_type': 'msie:webcache:container', 'date_time': '2019-03-20 17:22:14.0000000', 'entry_identifier': 63, 'sync_count': 0, 'response_headers': '[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: Mon, 16 Dec 2019 20:55:28 GMT]', 'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION, 'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values) | 6,452,848,726,871,535,000 | Tests the Process function on database with a PartitionsEx table. | tests/parsers/esedb_plugins/msie_webcache.py | testProcessOnDatabaseWithPartitionsExTable | ColdSmoke627/plaso | python | def testProcessOnDatabaseWithPartitionsExTable(self):
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4200)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {'access_count': 5, 'cache_identifier': 0, 'cached_file_size': 726, 'cached_filename': 'b83d57c0[1].svg', 'container_identifier': 14, 'data_type': 'msie:webcache:container', 'date_time': '2019-03-20 17:22:14.0000000', 'entry_identifier': 63, 'sync_count': 0, 'response_headers': '[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: Mon, 16 Dec 2019 20:55:28 GMT]', 'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION, 'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values) |
def m_step_gaussian_mixture(data, gamma):
'% Performs the M-step of the EM algorithm for gaussain mixture model.\n %\n % @param data : n x d matrix with rows as d dimensional data points\n % @param gamma : n x k matrix of resposibilities\n %\n % @return pi : k x 1 array\n % @return mu : k x d matrix of maximized cluster centers\n % @return sigma : cell array of maximized \n %\n '
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = (Nkk / n)
for dd in range(d):
mu[(kk, dd)] = (np.sum((gamma[:, kk] * data[:, dd]), axis=0) / Nkk)
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = (data - mu[kk, :])
for nn in range(n):
sigma[kk] += (gamma[(nn, kk)] * np.dot(centered_data[(nn, None)].T, centered_data[(nn, None)]))
sigma[kk] /= Nkk
return [mu, sigma, pi] | 333,318,272,719,222,850 | % Performs the M-step of the EM algorithm for gaussain mixture model.
%
% @param data : n x d matrix with rows as d dimensional data points
% @param gamma : n x k matrix of resposibilities
%
% @return pi : k x 1 array
% @return mu : k x d matrix of maximized cluster centers
% @return sigma : cell array of maximized
% | src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py | m_step_gaussian_mixture | leonardbj/AIMS | python | def m_step_gaussian_mixture(data, gamma):
'% Performs the M-step of the EM algorithm for gaussain mixture model.\n %\n % @param data : n x d matrix with rows as d dimensional data points\n % @param gamma : n x k matrix of resposibilities\n %\n % @return pi : k x 1 array\n % @return mu : k x d matrix of maximized cluster centers\n % @return sigma : cell array of maximized \n %\n '
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = (Nkk / n)
for dd in range(d):
mu[(kk, dd)] = (np.sum((gamma[:, kk] * data[:, dd]), axis=0) / Nkk)
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = (data - mu[kk, :])
for nn in range(n):
sigma[kk] += (gamma[(nn, kk)] * np.dot(centered_data[(nn, None)].T, centered_data[(nn, None)]))
sigma[kk] /= Nkk
return [mu, sigma, pi] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.