code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
if Height > 0:
return (RatioVCOrifice * area_circle(Diam).magnitude
* np.sqrt(2 * gravity.magnitude * Height))
else:
return 0 | def flow_orifice(Diam, Height, RatioVCOrifice) | Return the flow rate of the orifice. | 6.889877 | 6.806971 | 1.01218 |
#Checking input validity
ut.check_range([RatioVCOrifice, "0-1", "VC orifice ratio"])
if Height > -Diam / 2:
flow_vert = integrate.quad(lambda z: (Diam * np.sin(np.arccos(z/(Diam/2)))
* np.sqrt(Height - z)
),
- Diam / 2,
min(Diam/2, Height))
return flow_vert[0] * RatioVCOrifice * np.sqrt(2 * gravity.magnitude)
else:
return 0 | def flow_orifice_vert(Diam, Height, RatioVCOrifice) | Return the vertical flow rate of the orifice. | 6.436082 | 6.390907 | 1.007069 |
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [FlowRate, ">0", "Flow rate"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
return ((FlowRate
/ (RatioVCOrifice * area_circle(Diam).magnitude)
)**2
/ (2*gravity.magnitude)
) | def head_orifice(Diam, RatioVCOrifice, FlowRate) | Return the head of the orifice. | 7.856182 | 7.874472 | 0.997677 |
#Checking input validity
ut.check_range([Height, ">0", "Height"], [FlowRate, ">0", "Flow rate"],
[RatioVCOrifice, "0-1, >0", "VC orifice ratio"])
return FlowRate / (RatioVCOrifice * np.sqrt(2 * gravity.magnitude * Height)) | def area_orifice(Height, RatioVCOrifice, FlowRate) | Return the area of the orifice. | 6.98864 | 7.000474 | 0.99831 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
return np.ceil(area_orifice(HeadLossOrifice, RatioVCOrifice,
FlowPlant).magnitude
/ area_circle(DiamOrifice).magnitude) | def num_orifices(FlowPlant, RatioVCOrifice, HeadLossOrifice, DiamOrifice) | Return the number of orifices. | 8.872486 | 8.15137 | 1.088466 |
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [Nu, ">0", "Nu"])
return np.pi * Diam * RE_TRANSITION_PIPE * Nu / 4 | def flow_transition(Diam, Nu) | Return the flow rate for the laminar/turbulent transition.
This equation is used in some of the other equations for flow. | 13.56808 | 16.173342 | 0.838916 |
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [Length, ">0", "Length"],
[HeadLossFric, ">=0", "Headloss due to friction"],
[Nu, ">0", "Nu"])
return (np.pi*Diam**4) / (128*Nu) * gravity.magnitude * HeadLossFric / Length | def flow_hagen(Diam, HeadLossFric, Length, Nu) | Return the flow rate for laminar flow with only major losses. | 6.686502 | 6.628749 | 1.008713 |
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [Length, ">0", "Length"],
[HeadLossFric, ">0", "Headloss due to friction"],
[Nu, ">0", "Nu"], [PipeRough, "0-1", "Pipe roughness"])
logterm = np.log10(PipeRough / (3.7 * Diam)
+ 2.51 * Nu * np.sqrt(Length / (2 * gravity.magnitude
* HeadLossFric
* Diam**3)
)
)
return ((-np.pi / np.sqrt(2)) * Diam**(5/2) * logterm
* np.sqrt(gravity.magnitude * HeadLossFric / Length)
) | def flow_swamee(Diam, HeadLossFric, Length, Nu, PipeRough) | Return the flow rate for turbulent flow with only major losses. | 5.823294 | 5.72441 | 1.017274 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
FlowHagen = flow_hagen(Diam, HeadLossFric, Length, Nu).magnitude
if FlowHagen < flow_transition(Diam, Nu).magnitude:
return FlowHagen
else:
return flow_swamee(Diam, HeadLossFric, Length, Nu, PipeRough).magnitude | def flow_pipemajor(Diam, HeadLossFric, Length, Nu, PipeRough) | Return the flow rate with only major losses.
This function applies to both laminar and turbulent flows. | 5.835845 | 5.739833 | 1.016727 |
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([HeadLossExpans, ">=0", "Headloss due to expansion"],
[KMinor, ">0", "K minor"])
return (area_circle(Diam).magnitude * np.sqrt(2 * gravity.magnitude
* HeadLossExpans
/ KMinor)
) | def flow_pipeminor(Diam, HeadLossExpans, KMinor) | Return the flow rate with only minor losses.
This function applies to both laminar and turbulent flows. | 14.971054 | 15.551725 | 0.962662 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
if KMinor == 0:
FlowRate = flow_pipemajor(Diam, HeadLoss, Length, Nu,
PipeRough).magnitude
else:
FlowRatePrev = 0
err = 1.0
FlowRate = min(flow_pipemajor(Diam, HeadLoss, Length,
Nu, PipeRough).magnitude,
flow_pipeminor(Diam, HeadLoss, KMinor).magnitude
)
while err > 0.01:
FlowRatePrev = FlowRate
HLFricNew = (HeadLoss * headloss_fric(FlowRate, Diam, Length,
Nu, PipeRough).magnitude
/ (headloss_fric(FlowRate, Diam, Length,
Nu, PipeRough).magnitude
+ headloss_exp(FlowRate, Diam, KMinor).magnitude
)
)
FlowRate = flow_pipemajor(Diam, HLFricNew, Length,
Nu, PipeRough).magnitude
if FlowRate == 0:
err = 0.0
else:
err = (abs(FlowRate - FlowRatePrev)
/ ((FlowRate + FlowRatePrev) / 2)
)
return FlowRate | def flow_pipe(Diam, HeadLoss, Length, Nu, PipeRough, KMinor) | Return the the flow in a straight pipe.
This function works for both major and minor losses and
works whether the flow is laminar or turbulent. | 3.314393 | 3.289721 | 1.0075 |
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Length, ">0", "Length"],
[HeadLossFric, ">0", "Headloss due to friction"],
[Nu, ">0", "Nu"], [PipeRough, "0-1", "Pipe roughness"])
a = ((PipeRough ** 1.25)
* ((Length * FlowRate**2)
/ (gravity.magnitude * HeadLossFric)
)**4.75
)
b = (Nu * FlowRate**9.4
* (Length / (gravity.magnitude * HeadLossFric)) ** 5.2
)
return 0.66 * (a+b)**0.04 | def diam_swamee(FlowRate, HeadLossFric, Length, Nu, PipeRough) | Return the inner diameter of a pipe.
The Swamee Jain equation is dimensionally correct and returns the
inner diameter of a pipe given the flow rate and the head loss due
to shear on the pipe walls. The Swamee Jain equation does NOT take
minor losses into account. This equation ONLY applies to turbulent
flow. | 5.756063 | 5.844717 | 0.984832 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
DiamLaminar = diam_hagen(FlowRate, HeadLossFric, Length, Nu).magnitude
if re_pipe(FlowRate, DiamLaminar, Nu) <= RE_TRANSITION_PIPE:
return DiamLaminar
else:
return diam_swamee(FlowRate, HeadLossFric, Length,
Nu, PipeRough).magnitude | def diam_pipemajor(FlowRate, HeadLossFric, Length, Nu, PipeRough) | Return the pipe IDiam that would result in given major losses.
This function applies to both laminar and turbulent flow. | 6.85738 | 6.19955 | 1.106109 |
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [KMinor, ">=0", "K minor"],
[HeadLossExpans, ">0", "Headloss due to expansion"])
return (np.sqrt(4 * FlowRate / np.pi)
* (KMinor / (2 * gravity.magnitude * HeadLossExpans)) ** (1/4)
) | def diam_pipeminor(FlowRate, HeadLossExpans, KMinor) | Return the pipe ID that would result in the given minor losses.
This function applies to both laminar and turbulent flow. | 7.497428 | 7.979239 | 0.939617 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
if KMinor == 0:
Diam = diam_pipemajor(FlowRate, HeadLoss, Length, Nu,
PipeRough).magnitude
else:
Diam = max(diam_pipemajor(FlowRate, HeadLoss,
Length, Nu, PipeRough).magnitude,
diam_pipeminor(FlowRate, HeadLoss, KMinor).magnitude)
err = 1.00
while err > 0.001:
DiamPrev = Diam
HLFricNew = (HeadLoss * headloss_fric(FlowRate, Diam, Length,
Nu, PipeRough
).magnitude
/ (headloss_fric(FlowRate, Diam, Length,
Nu, PipeRough
).magnitude
+ headloss_exp(FlowRate,
Diam, KMinor
).magnitude
)
)
Diam = diam_pipemajor(FlowRate, HLFricNew, Length, Nu, PipeRough
).magnitude
err = abs(Diam - DiamPrev) / ((Diam + DiamPrev) / 2)
return Diam | def diam_pipe(FlowRate, HeadLoss, Length, Nu, PipeRough, KMinor) | Return the pipe ID that would result in the given total head loss.
This function applies to both laminar and turbulent flow and
incorporates both minor and major losses. | 3.662246 | 3.636807 | 1.006995 |
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Height, ">0", "Height"])
return ((3 / 2) * FlowRate
/ (con.VC_ORIFICE_RATIO * np.sqrt(2 * gravity.magnitude) * Height ** (3 / 2))
) | def width_rect_weir(FlowRate, Height) | Return the width of a rectangular weir. | 12.518563 | 12.127968 | 1.032206 |
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Width, ">0", "Width"])
return (((3/2) * FlowRate
/ (con.VC_ORIFICE_RATIO * np.sqrt(2 * gravity.magnitude) * Width)
) ** (2/3)) | def headloss_weir(FlowRate, Width) | Return the headloss of a weir. | 14.649833 | 14.36999 | 1.019474 |
#Checking input validity
ut.check_range([Height, ">0", "Height"], [Width, ">0", "Width"])
return ((2/3) * con.VC_ORIFICE_RATIO
* (np.sqrt(2*gravity.magnitude) * Height**(3/2))
* Width) | def flow_rect_weir(Height, Width) | Return the flow of a rectangular weir. | 15.758524 | 15.602004 | 1.010032 |
#Checking input validity
ut.check_range([FlowRate, ">0", "Flow rate"], [Width, ">0", "Width"])
return (FlowRate / (Width * np.sqrt(gravity.magnitude))) ** (2/3) | def height_water_critical(FlowRate, Width) | Return the critical local water depth. | 10.179559 | 10.066585 | 1.011223 |
#Checking input validity
ut.check_range([HeightWaterCritical, ">0", "Critical height of water"])
return np.sqrt(gravity.magnitude * HeightWaterCritical) | def vel_horizontal(HeightWaterCritical) | Return the horizontal velocity. | 19.886089 | 18.799892 | 1.057777 |
#Checking input validity
ut.check_range([Length, ">0", "Length"], [Diam, ">0", "Diam"],
[Vel, ">0", "Velocity"], [Nu, ">0", "Nu"],
[Porosity, "0-1", "Porosity"])
return (K_KOZENY * Length * Nu
/ gravity.magnitude * (1-Porosity)**2
/ Porosity**3 * 36 * Vel
/ Diam ** 2) | def headloss_kozeny(Length, Diam, Vel, Porosity, Nu) | Return the Carmen Kozeny Sand Bed head loss. | 5.931184 | 5.927825 | 1.000567 |
if color != "":
inner_diameter = ID_colored_tube(color)
term1 = (R_pump * 2 * np.pi - k_nonlinear * inner_diameter) / u.rev
term2 = np.pi * (inner_diameter ** 2) / 4
return (term1 * term2).to(u.mL/u.rev) | def vol_per_rev_3_stop(color="", inner_diameter=0) | Return the volume per revolution of an Ismatec 6 roller pump
given the inner diameter (ID) of 3-stop tubing. The calculation is
interpolated from the table found at
http://www.ismatec.com/int_e/pumps/t_mini_s_ms_ca/tubing_msca2.htm.
Note:
1. Either input a string as the tubing color code or a number as the
tubing inner diameter. If both are given, the function will default to using
the color.
2. The calculation is interpolated for inner diameters between 0.13 and 3.17
mm. Accuracy is not guaranteed for tubes with smaller or larger diameters.
:param color: Color code of the Ismatec 3-stop tubing
:type color: string
:param inner_diameter: Inner diameter of the Ismatec 3-stop tubing. Results will be most accurate for inner diameters between 0.13 and 3.17 mm.
:type inner_diameter: float
:return: Volume per revolution output by a 6-roller pump through the 3-stop tubing (mL/rev)
:rtype: float
:Examples:
>>> from aguaclara.research.peristaltic_pump import vol_per_rev_3_stop
>>> from aguaclara.core.units import unit_registry as u
>>> round(vol_per_rev_3_stop(color="yellow-blue"), 6)
<Quantity(0.148846, 'milliliter / rev')>
>>> round(vol_per_rev_3_stop(inner_diameter=.20*u.mm), 6)
<Quantity(0.003116, 'milliliter / rev')> | 8.112025 | 8.808903 | 0.920889 |
tubing_data_path = os.path.join(os.path.dirname(__file__), "data",
"3_stop_tubing.txt")
df = pd.read_csv(tubing_data_path, delimiter='\t')
idx = df["Color"] == color
return df[idx]['Diameter (mm)'].values[0] * u.mm | def ID_colored_tube(color) | Look up the inner diameter of Ismatec 3-stop tubing given its color code.
:param color: Color of the 3-stop tubing
:type color: string
:returns: Inner diameter of the 3-stop tubing (mm)
:rtype: float
:Examples:
>>> from aguaclara.research.peristaltic_pump import ID_colored_tube
>>> from aguaclara.core.units import unit_registry as u
>>> ID_colored_tube("yellow-blue")
<Quantity(1.52, 'millimeter')>
>>> ID_colored_tube("orange-yellow")
<Quantity(0.51, 'millimeter')>
>>> ID_colored_tube("purple-white")
<Quantity(2.79, 'millimeter')> | 4.418539 | 3.458586 | 1.277556 |
tubing_data_path = os.path.join(os.path.dirname(__file__), "data",
"LS_tubing.txt")
df = pd.read_csv(tubing_data_path, delimiter='\t')
idx = df["Number"] == id_number
return df[idx]['Flow (mL/rev)'].values[0] * u.mL/u.turn | def vol_per_rev_LS(id_number) | Look up the volume per revolution output by a Masterflex L/S pump
through L/S tubing of the given ID number.
:param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36.
:type id_number: int
:return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing
:rtype: float
:Examples:
>>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS
>>> from aguaclara.core.units import unit_registry as u
>>> vol_per_rev_LS(13)
<Quantity(0.06, 'milliliter / turn')>
>>> vol_per_rev_LS(18)
<Quantity(3.8, 'milliliter / turn')> | 4.385682 | 4.158413 | 1.054653 |
return (vol_per_rev * rpm).to(u.mL/u.s) | def flow_rate(vol_per_rev, rpm) | Return the flow rate from a pump given the volume of fluid pumped per
revolution and the desired pump speed.
:param vol_per_rev: Volume of fluid output per revolution (dependent on pump and tubing)
:type vol_per_rev: float
:param rpm: Desired pump speed in revolutions per minute
:type rpm: float
:return: Flow rate of the pump (mL/s)
:rtype: float
:Examples:
>>> from aguaclara.research.peristaltic_pump import flow_rate
>>> from aguaclara.core.units import unit_registry as u
>>> flow_rate(3*u.mL/u.rev, 5*u.rev/u.min)
<Quantity(0.25, 'milliliter / second')> | 3.791711 | 6.755437 | 0.561283 |
if ent_pipe_id > exit_pipe_id:
print('k_value_expansion: Entrance pipe\'s inner diameter is larger '
'than exit pipe\'s inner diameter, using reduction instead.')
return k_value_reduction(ent_pipe_id, exit_pipe_id, q,
fitting_angle, rounded,
nu, pipe_rough)
f = pc.fric(q, ent_pipe_id, nu, pipe_rough) # Darcy friction factor.
re = pc.re_pipe(q, ent_pipe_id, nu) # Entrance pipe's Reynolds number.
fitting_type = _get_fitting_type(fitting_angle, rounded)
if fitting_type == 'square':
return _k_value_square_expansion(ent_pipe_id, exit_pipe_id, re, f)
elif fitting_type == 'tapered':
return _k_value_tapered_expansion(ent_pipe_id, exit_pipe_id, re, f)
elif fitting_type == 'rounded':
return _k_value_rounded_expansion(ent_pipe_id, exit_pipe_id, re)
elif fitting_type == 'ambiguous':
raise ValueError('The fitting is ambiguously both tapered and rounded. '
'Please set only either fitting_angle or rounded.') | def k_value_expansion(ent_pipe_id, exit_pipe_id, q,
fitting_angle=180, rounded=False,
nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH) | Calculates the minor loss coefficient (k-value) of a square,
tapered, or rounded expansion in a pipe. Defaults to square.
To use tapered, set angle to something that isn't 180.
To use rounded, set rounded to True.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter from which fluid flows.
exit_pipe_id: Exit pipe's inner diameter to which fluid flows.
q: Fluid's flow rate.
fitting_angle: Fitting angle. Default: square (180 degrees).
rounded: Rounded fitting. Default: square (False).
nu: Fluid's dynamic viscosity of the fluid. Default: room
temperature water (1 * 10**-6 * m**2/s)
pipe_rough: Pipe roughness. Default: PVC pipe roughness
Returns:
k-value of expansion. | 2.812205 | 2.506133 | 1.122129 |
if ent_pipe_id < exit_pipe_id:
print('k_value_reduction: Entrance pipe\'s inner diameter is less than '
'exit pipe\'s inner diameter, using expansion instead.')
return k_value_expansion(ent_pipe_id, exit_pipe_id, q,
fitting_angle, rounded,
nu, pipe_rough)
f = pc.fric(q, ent_pipe_id, nu, pipe_rough) # Darcy friction factor.
re = pc.re_pipe(q, ent_pipe_id, nu) # Entrance pipe's Reynolds number.
fitting_type = _get_fitting_type(fitting_angle, rounded)
if fitting_type == 'square':
return _k_value_square_reduction(ent_pipe_id, exit_pipe_id, re, f)
elif fitting_type == 'tapered':
return _k_value_tapered_reduction(ent_pipe_id, exit_pipe_id, fitting_angle, re, f)
elif fitting_type == 'rounded':
return _k_value_rounded_reduction(ent_pipe_id, exit_pipe_id, re)
elif fitting_type == 'ambiguous':
raise ValueError('The fitting is ambiguously both tapered and rounded.'
'Please set only either fitting_angle or rounded.') | def k_value_reduction(ent_pipe_id, exit_pipe_id, q,
fitting_angle=180, rounded=False,
nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH) | Calculates the minor loss coefficient (k-value) of a square,
tapered, or rounded reduction in a pipe. Defaults to square.
To use tapered, set angle to something that isn't 180.
To use rounded, set rounded to True.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter from which fluid flows.
exit_pipe_id: Exit pipe's inner diameter to which fluid flows.
q: Fluid's q rate.
fitting_angle: Fitting angle. Default: square (180 degrees).
rounded: Rounded fitting. Default: square (False).
nu: Fluid's dynamic viscosity of the fluid. Default: room
temperature water (1 * 10**-6 * m**2/s)
pipe_rough: Pipe roughness. Default: PVC pipe roughness
Returns:
k-value of reduction. | 2.961194 | 2.599242 | 1.139253 |
if orifice_id > pipe_id:
raise ValueError('The orifice\'s inner diameter cannot be larger than'
'that of the entrance pipe.')
re = pc.re_pipe(q, pipe_id, nu) # Entrance pipe's Reynolds number.
orifice_type = _get_orifice_type(orifice_l, orifice_id)
if orifice_type == 'thin':
return _k_value_thin_sharp_orifice(pipe_id, orifice_id, re)
elif orifice_type == 'thick':
return _k_value_thick_orifice(pipe_id, orifice_id, orifice_l, re)
elif orifice_type == 'oversize':
return k_value_reduction(pipe_id, orifice_id, q) \
+ k_value_expansion(orifice_id, pipe_id, q) | def k_value_orifice(pipe_id, orifice_id, orifice_l, q,
nu=con.WATER_NU) | Calculates the minor loss coefficient of an orifice plate in a
pipe.
Parameters:
pipe_id: Entrance pipe's inner diameter from which fluid flows.
orifice_id: Orifice's inner diameter.
orifice_l: Orifice's length from start to end.
q: Fluid's q rate.
nu: Fluid's dynamic viscosity of the fluid. Default: room
temperature water (1 * 10**-6 * m**2/s)
Returns:
k-value at the orifice. | 3.079335 | 2.702642 | 1.13938 |
if re < 2500:
return (1.2 + (160 / re)) * ((ent_pipe_id / exit_pipe_id) ** 4)
else:
return (0.6 + 0.48 * f) * (ent_pipe_id / exit_pipe_id) ** 2\
* ((ent_pipe_id / exit_pipe_id) ** 2 - 1) | def _k_value_square_reduction(ent_pipe_id, exit_pipe_id, re, f) | Returns the minor loss coefficient for a square reducer.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter.
exit_pipe_id: Exit pipe's inner diameter.
re: Reynold's number.
f: Darcy friction factor. | 3.335631 | 2.894159 | 1.152539 |
k_value_square_reduction = _k_value_square_reduction(ent_pipe_id, exit_pipe_id,
re, f)
if 45 < fitting_angle <= 180:
return k_value_square_reduction * np.sqrt(np.sin(fitting_angle / 2))
elif 0 < fitting_angle <= 45:
return k_value_square_reduction * 1.6 * np.sin(fitting_angle / 2)
else:
raise ValueError('k_value_tapered_reduction: The reducer angle ('
+ fitting_angle + ') cannot be outside of [0,180].') | def _k_value_tapered_reduction(ent_pipe_id, exit_pipe_id, fitting_angle, re, f) | Returns the minor loss coefficient for a tapered reducer.
Parameters:
ent_pipe_id: Entrance pipe's inner diameter.
exit_pipe_id: Exit pipe's inner diameter.
fitting_angle: Fitting angle between entrance and exit pipes.
re: Reynold's number.
f: Darcy friction factor. | 3.036279 | 3.196558 | 0.949859 |
nu = pc.viscosity_kinematic(T)
K_minor = con.PIPE_ENTRANCE_K_MINOR + con.PIPE_EXIT_K_MINOR + con.EL90_K_MINOR
drain_ID = pc.diam_pipe(q_plant, depth_end, depth_end, nu, mat.PVC_PIPE_ROUGH, K_minor)
drain_ND = pipe.SDR_available_ND(drain_ID, SDR)
return pipe.OD(drain_ND).magnitude | def drain_OD(q_plant, T, depth_end, SDR) | Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
?? | 11.070817 | 13.176544 | 0.840191 |
num_plates = np.ceil(np.sqrt(q_plant / (design.ent_tank.CENTER_PLATE_DIST.magnitude
* W_chan * design.ent_tank.CAPTURE_BOD_VEL.magnitude * np.sin(
design.ent_tank.PLATE_ANGLE.to(u.rad).magnitude))))
return num_plates | def num_plates_ET(q_plant, W_chan) | Return the number of plates in the entrance tank.
This number minimizes the total length of the plate settler unit.
Parameters
----------
q_plant: float
Plant flow rate
W_chan: float
Width of channel
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
>>> num_plates_ET(20*u.L/u.s,2*u.m)
1.0 | 8.202454 | 8.418616 | 0.974323 |
L_plate = (q_plant / (num_plates_ET(q_plant, W_chan) * W_chan *
design.ent_tank.CAPTURE_BOD_VEL.magnitude * np.cos(
design.ent_tank.PLATE_ANGLE.to(u.rad).magnitude)))
- (design.ent_tank.PLATE_S.magnitude * np.tan(design.ent_tank.PLATE_ANGLE.to(u.rad).magnitude))
return L_plate | def L_plate_ET(q_plant, W_chan) | Return the length of the plates in the entrance tank.
Parameters
----------
q_plant: float
Plant flow rate
W_chan: float
Width of channel
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
>>> L_plate_ET(20*u.L/u.s,2*u.m)
0.00194 | 6.211362 | 6.199881 | 1.001852 |
alpha0_carbonate = 1/(1+(K1_carbonate/invpH(pH)) *
(1+(K2_carbonate/invpH(pH))))
return alpha0_carbonate | def alpha0_carbonate(pH) | Calculate the fraction of total carbonates in carbonic acid form (H2CO3)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in carbonic acid form (H2CO3)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha0_carbonate
>>> round(alpha0_carbonate(10), 7)
<Quantity(0.00015, 'dimensionless')> | 5.225934 | 9.980538 | 0.523612 |
alpha1_carbonate = 1/((invpH(pH)/K1_carbonate) + 1 +
(K2_carbonate/invpH(pH)))
return alpha1_carbonate | def alpha1_carbonate(pH) | Calculate the fraction of total carbonates in bicarbonate form (HCO3-)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in bicarbonate form (HCO3-)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha1_carbonate
>>> round(alpha1_carbonate(10), 7)
<Quantity(0.639969, 'dimensionless')> | 5.692072 | 10.837541 | 0.525218 |
alpha2_carbonate = 1/(1+(invpH(pH)/K2_carbonate) *
(1+(invpH(pH)/K1_carbonate)))
return alpha2_carbonate | def alpha2_carbonate(pH) | Calculate the fraction of total carbonates in carbonate form (CO3-2)
:param pH: pH of the system
:type pH: float
:return: Fraction of carbonates in carbonate form (CO3-2)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import alpha2_carbonate
>>> round(alpha2_carbonate(10), 7)
<Quantity(0.359881, 'dimensionless')> | 5.828625 | 10.252178 | 0.568525 |
return (total_carbonates * (u.eq/u.mol * alpha1_carbonate(pH) +
2 * u.eq/u.mol * alpha2_carbonate(pH)) +
1 * u.eq/u.mol * Kw/invpH(pH) - 1 * u.eq/u.mol * invpH(pH)) | def ANC_closed(pH, total_carbonates) | Calculate the acid neutralizing capacity (ANC) under a closed system
in which no carbonates are exchanged with the atmosphere during the
experiment. Based on pH and total carbonates in the system.
:param pH: pH of the system
:type pH: float
:param total_carbonates: Total carbonate concentration in the system (mole/L)
:type total_carbonates: float
:return: The acid neutralizing capacity of the closed system (eq/L)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import ANC_closed
>>> from aguaclara.core.units import unit_registry as u
>>> round(ANC_closed(10, 1*u.mol/u.L), 7)
<Quantity(1.359831, 'equivalent / liter')> | 5.367537 | 6.009799 | 0.893131 |
#return the list of files in the directory
filenames = os.listdir(dirpath)
#extract the flowrates from the filenames and apply units
airflows = ((np.array([i.split('.', 1)[0] for i in filenames])).astype(np.float32))
#sort airflows and filenames so that they are in ascending order of flow rates
idx = np.argsort(airflows)
airflows = (np.array(airflows)[idx])*u.umole/u.s
filenames = np.array(filenames)[idx]
filepaths = [os.path.join(dirpath, i) for i in filenames]
#DO_data is a list of numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
# cycle through all of the files and extract the column of data with oxygen concentrations and the times
DO_data=[column_of_data(i,0,DO_column,-1,'mg/L') for i in filepaths]
time_data=[(column_of_time(i,0,-1)).to(u.s) for i in filepaths]
aeration_collection = collections.namedtuple('aeration_results','filepaths airflows DO_data time_data')
aeration_results = aeration_collection(filepaths, airflows, DO_data, time_data)
return aeration_results | def aeration_data(DO_column, dirpath) | Extract the data from folder containing tab delimited
files of aeration data. The file must be the original tab delimited file.
All text strings below the header must be removed from these files.
The file names must be the air flow rates with units of micromoles/s.
An example file name would be "300.xls" where 300 is the flow rate in
micromoles/s. The function opens a file dialog for the user to select
the directory containing the data.
:param DO_column: Index of the column that contains the dissolved oxygen concentration data.
:type DO_columm: int
:param dirpath: Path to the directory containing aeration data you want to analyze
:type dirpath: string
:return: collection of
* **filepaths** (*string list*) - All file paths in the directory sorted by flow rate
* **airflows** (*numpy.array*) - Sorted array of air flow rates with units of micromole/s
* **DO_data** (*numpy.array list*) - Sorted list of Numpy arrays. Thus each of the numpy data arrays can have different lengths to accommodate short and long experiments
* **time_data** (*numpy.array list*) - Sorted list of Numpy arrays containing the times with units of seconds | 4.604628 | 3.22583 | 1.427424 |
fraction_O2 = 0.21
P_O2 = P_air * fraction_O2
return ((P_O2.to(u.atm).magnitude) *
u.mg/u.L*np.exp(1727 / temp.to(u.K).magnitude - 2.105)) | def O2_sat(P_air, temp) | Calculate saturaed oxygen concentration in mg/L for 278 K < T < 318 K
:param P_air: Air pressure with appropriate units
:type P_air: float
:param temp: Water temperature with appropriate units
:type temp: float
:return: Saturated oxygen concentration in mg/L
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import O2_sat
>>> from aguaclara.core.units import unit_registry as u
>>> round(O2_sat(1*u.atm , 300*u.kelvin), 7)
<Quantity(8.0931572, 'milligram / liter')> | 5.660686 | 7.004483 | 0.808152 |
df = pd.read_csv(data_file_path, delimiter='\t', header=5)
V_t = np.array(pd.to_numeric(df.iloc[0:, 0]))*u.mL
pH = np.array(pd.to_numeric(df.iloc[0:, 1]))
df = pd.read_csv(data_file_path, delimiter='\t', header=-1, nrows=5)
V_S = pd.to_numeric(df.iloc[0, 1])*u.mL
N_t = pd.to_numeric(df.iloc[1, 1])*u.mole/u.L
V_eq = pd.to_numeric(df.iloc[2, 1])*u.mL
ANC_sample = pd.to_numeric(df.iloc[3, 1])*u.mole/u.L
Gran_collection = collections.namedtuple('Gran_results', 'V_titrant ph_data V_sample Normality_titrant V_equivalent ANC')
Gran = Gran_collection(V_titrant=V_t, ph_data=pH, V_sample=V_S,
Normality_titrant=N_t, V_equivalent=V_eq,
ANC=ANC_sample)
return Gran | def Gran(data_file_path) | Extract the data from a ProCoDA Gran plot file. The file must be the original tab delimited file.
:param data_file_path: The path to the file. If the file is in the working directory, then the file name is sufficient.
:return: collection of
* **V_titrant** (*float*) - Volume of titrant in mL
* **ph_data** (*numpy.array*) - pH of the sample
* **V_sample** (*float*) - Volume of the original sample that was titrated in mL
* **Normality_titrant** (*float*) - Normality of the acid used to titrate the sample in mole/L
* **V_equivalent** (*float*) - Volume of acid required to consume all of the ANC in mL
* **ANC** (*float*) - Acid Neutralizing Capacity of the sample in mole/L | 2.949741 | 1.949054 | 1.513422 |
return C_influent * (1-np.exp(-t)) + C_initial*np.exp(-t) | def CMFR(t, C_initial, C_influent) | Calculate the effluent concentration of a conversative (non-reacting)
material with continuous input to a completely mixed flow reactor.
Note: time t=0 is the time at which the material starts to flow into the
reactor.
:param C_initial: The concentration in the CMFR at time t=0.
:type C_initial: float
:param C_influent: The concentration entering the CMFR.
:type C_influent: float
:param t: The time(s) at which to calculate the effluent concentration. Time can be made dimensionless by dividing by the residence time of the CMFR.
:type t: float or numpy.array
:return: Effluent concentration
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import CMFR
>>> from aguaclara.core.units import unit_registry as u
>>> round(CMFR(0.1, 0*u.mg/u.L, 10*u.mg/u.L), 7)
<Quantity(0.9516258, 'milligram / liter')>
>>> round(CMFR(0.9, 5*u.mg/u.L, 10*u.mg/u.L), 7)
<Quantity(7.9671517, 'milligram / liter')> | 3.154002 | 6.300292 | 0.500612 |
return (N**N)/special.gamma(N) * (t**(N-1))*np.exp(-N*t) | def E_CMFR_N(t, N) | Calculate a dimensionless measure of the output tracer concentration
from a spike input to a series of completely mixed flow reactors.
:param t: The time(s) at which to calculate the effluent concentration. Time can be made dimensionless by dividing by the residence time of the CMFR.
:type t: float or numpy.array
:param N: The number of completely mixed flow reactors (CMFRS) in series. Must be greater than 1.
:type N: int
:return: Dimensionless measure of the output tracer concentration (concentration * volume of 1 CMFR) / (mass of tracer)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import E_CMFR_N
>>> round(E_CMFR_N(0.5, 3), 7)
0.7530643
>>> round(E_CMFR_N(0.1, 1), 7)
0.9048374 | 6.479872 | 14.56218 | 0.44498 |
# replace any times at zero with a number VERY close to zero to avoid
# divide by zero errors
if isinstance(t, list):
t[t == 0] = 10**(-10)
return (Pe/(4*np.pi*t))**(0.5)*np.exp((-Pe*((1-t)**2))/(4*t)) | def E_Advective_Dispersion(t, Pe) | Calculate a dimensionless measure of the output tracer concentration from
a spike input to reactor with advection and dispersion.
:param t: The time(s) at which to calculate the effluent concentration. Time can be made dimensionless by dividing by the residence time of the CMFR.
:type t: float or numpy.array
:param Pe: The ratio of advection to dispersion ((mean fluid velocity)/(Dispersion*flow path length))
:type Pe: float
:return: dimensionless measure of the output tracer concentration (concentration * volume of reactor) / (mass of tracer)
:rtype: float
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import E_Advective_Dispersion
>>> round(E_Advective_Dispersion(0.5, 5), 7)
0.4774864 | 6.360907 | 10.044112 | 0.633297 |
return C_bar*E_CMFR_N(t_seconds/t_bar, N) | def Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) | Used by Solver_CMFR_N. All inputs and outputs are unitless. This is
The model function, f(x, ...). It takes the independent variable as the
first argument and the parameters to fit as separate remaining arguments.
:param t_seconds: List of times
:type t_seconds: float list
:param t_bar: Average time spent in the reactor
:type t_bar: float
:param C_bar: Average concentration (mass of tracer)/(volume of the reactor)
:type C_bar: float
:param N: Number of completely mixed flow reactors (CMFRs) in series, must be greater than 1
:type N: int
:return: The model concentration as a function of time
:rtype: float list
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import Tracer_CMFR_N
>>> from aguaclara.core.units import unit_registry as u
>>> Tracer_CMFR_N([1, 2, 3, 4, 5]*u.s, 5*u.s, 10*u.mg/u.L, 3)
<Quantity([2.96358283 6.50579498 8.03352597 7.83803116 6.72125423], 'milligram / liter')> | 4.69345 | 10.316473 | 0.454947 |
C_unitless = C_data.magnitude
C_units = str(C_bar_guess.units)
t_seconds = (t_data.to(u.s)).magnitude
# assume that a guess of 1 reactor in series is close enough to get a solution
p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,1]
popt, pcov = curve_fit(Tracer_CMFR_N, t_seconds, C_unitless, p0)
Solver_theta = popt[0]*u.s
Solver_C_bar = popt[1]*u(C_units)
Solver_N = popt[2]
Reactor_results = collections.namedtuple('Reactor_results','theta C_bar N')
CMFR = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, N=Solver_N)
return CMFR | def Solver_CMFR_N(t_data, C_data, theta_guess, C_bar_guess) | Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **N** (*float*)- Number of CMFRS in series that best fit the data | 3.499706 | 3.2896 | 1.06387 |
return C_bar*E_Advective_Dispersion(t_seconds/t_bar, Pe) | def Tracer_AD_Pe(t_seconds, t_bar, C_bar, Pe) | Used by Solver_AD_Pe. All inputs and outputs are unitless. This is the
model function, f(x, ...). It takes the independent variable as the
first argument and the parameters to fit as separate remaining arguments.
:param t_seconds: List of times
:type t_seconds: float list
:param t_bar: Average time spent in the reactor
:type t_bar: float
:param C_bar: Average concentration ((mass of tracer)/(volume of the reactor))
:type C_bar: float
:param Pe: The Peclet number for the reactor.
:type Pe: float
:return: The model concentration as a function of time
:rtype: float list
:Examples:
>>> from aguaclara.research.environmental_processes_analysis import Tracer_AD_Pe
>>> from aguaclara.core.units import unit_registry as u
>>> Tracer_AD_Pe([1, 2, 3, 4, 5]*u.s, 5*u.s, 10*u.mg/u.L, 5)
<Quantity([0.25833732 3.23793989 5.8349833 6.62508831 6.30783131], 'milligram / liter')> | 9.450745 | 17.108545 | 0.552399 |
#remove time=0 data to eliminate divide by zero error
t_data = t_data[1:-1]
C_data = C_data[1:-1]
C_unitless = C_data.magnitude
C_units = str(C_bar_guess.units)
t_seconds = (t_data.to(u.s)).magnitude
# assume that a guess of 1 reactor in series is close enough to get a solution
p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,5]
popt, pcov = curve_fit(Tracer_AD_Pe, t_seconds, C_unitless, p0, bounds=(0.01,np.inf))
Solver_theta = popt[0]*u.s
Solver_C_bar = popt[1]*u(C_units)
Solver_Pe = popt[2]
Reactor_results = collections.namedtuple('Reactor_results', 'theta C_bar Pe')
AD = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, Pe=Solver_Pe)
return AD | def Solver_AD_Pe(t_data, C_data, theta_guess, C_bar_guess) | Use non-linear least squares to fit the function
Tracer_AD_Pe(t_seconds, t_bar, C_bar, Pe) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **Pe** (*float*) - Peclet number that best fits the data | 3.608969 | 3.439743 | 1.049197 |
u.default_format = '.' + str(n) + 'g'
pd.options.display.float_format = ('{:,.' + str(n) + '}').format | def set_sig_figs(n=4) | Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display. | 4.952579 | 6.414815 | 0.772053 |
data = data_from_dates(path, dates)
first_time_column = pd.to_numeric(data[0].iloc[:, 0])
start = max(day_fraction(start_time), first_time_column[0])
start_idx = time_column_index(start, first_time_column)
end_idx = time_column_index(day_fraction(end_time),
pd.to_numeric(data[-1].iloc[:, 0])) + 1
if isinstance(columns, int):
return column_start_to_end(data, columns, start_idx, end_idx)
else:
result = []
for c in columns:
result.append(column_start_to_end(data, c, start_idx, end_idx))
return result | def get_data_by_time(path, columns, dates, start_time='00:00', end_time='23:59') | Extract columns of data from a ProCoDA datalog based on date(s) and time(s)
Note: Column 0 is time. The first data column is column 1.
:param path: The path to the folder containing the ProCoDA data file(s)
:type path: string
:param columns: A single index of a column OR a list of indices of columns of data to extract.
:type columns: int or int list
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:param start_time: Starting time of data to extract, formatted 'HH:MM' (24-hour time)
:type start_time: string, optional
:param end_time: Ending time of data to extract, formatted 'HH:MM' (24-hour time)
:type end_time: string, optional
:return: a list containing the single column of data to extract, OR a list of lists containing the columns to extract, in order of the indices given in the columns variable
:rtype: list or list list
:Examples:
.. code-block:: python
data = get_data_by_time(path='/Users/.../ProCoDA Data/', columns=4, dates=['6-14-2018', '6-15-2018'], start_time='12:20', end_time='10:50')
data = get_data_by_time(path='/Users/.../ProCoDA Data/', columns=[0,4], dates='6-14-2018', start_time='12:20', end_time='23:59')
data = get_data_by_time(path='/Users/.../ProCoDA Data/', columns=[0,3,4], dates='6-14-2018') | 3.083707 | 3.563714 | 0.865307 |
has_text = data.iloc[:, 0].astype(str).str.contains('(?!e-)[a-zA-Z]')
text_rows = list(has_text.index[has_text])
return data.drop(text_rows) | def remove_notes(data) | Omit notes from a DataFrame object, where notes are identified as rows with non-numerical entries in the first column.
:param data: DataFrame object to remove notes from
:type data: Pandas.DataFrame
:return: DataFrame object with no notes
:rtype: Pandas.DataFrame | 5.630704 | 5.807158 | 0.969614 |
hour = int(time.split(":")[0])
minute = int(time.split(":")[1])
return hour/24 + minute/1440 | def day_fraction(time) | Convert a 24-hour time to a fraction of a day.
For example, midnight corresponds to 0.0, and noon to 0.5.
:param time: Time in the form of 'HH:MM' (24-hour time)
:type time: string
:return: A day fraction
:rtype: float
:Examples:
.. code-block:: python
day_fraction("18:30") | 2.464971 | 3.242226 | 0.760271 |
interval = time_column[1]-time_column[0]
return int(round((time - time_column[0])/interval + .5)) | def time_column_index(time, time_column) | Return the index of lowest time in the column of times that is greater
than or equal to the given time.
:param time: the time to index from the column of time; a day fraction
:type time: float
:param time_column: a list of times (in day fractions), must be increasing and equally spaced
:type time_column: float list
:return: approximate index of the time from the column of times
:rtype: int | 4.17466 | 4.315191 | 0.967433 |
if path[-1] != os.path.sep:
path += os.path.sep
if not isinstance(dates, list):
dates = [dates]
data = []
for d in dates:
filepath = path + 'datalog ' + d + '.xls'
data.append(remove_notes(pd.read_csv(filepath, delimiter='\t')))
return data | def data_from_dates(path, dates) | Return list DataFrames representing the ProCoDA datalogs stored in
the given path and recorded on the given dates.
:param path: The path to the folder containing the ProCoDA data file(s)
:type path: string
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:return: a list DataFrame objects representing the ProCoDA datalogs corresponding with the given dates
:rtype: pandas.DataFrame list | 3.505081 | 3.562594 | 0.983856 |
if len(data) == 1:
result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, column]))
else:
result = list(pd.to_numeric(data[0].iloc[start_idx:, column]))
for i in range(1, len(data)-1):
data[i].iloc[0, 0] = 0
result += list(pd.to_numeric(data[i].iloc[:, column]) +
(i if column == 0 else 0))
data[-1].iloc[0, 0] = 0
result += list(pd.to_numeric(data[-1].iloc[:end_idx, column]) +
(len(data)-1 if column == 0 else 0))
return result | def column_start_to_end(data, column, start_idx, end_idx) | Return a list of numeric data entries in the given column from the starting
index to the ending index. This can list can be compiled over one or more
DataFrames.
:param data: a list of DataFrames to extract data in one column from
:type data: Pandas.DataFrame list
:param column: a column index
:type column: int
:param start_idx: the index of the starting row
:type start_idx: int
:param start_idx: the index of the ending row
:type start_idx: int
:return: a list of data from the given column
:rtype: float list | 2.299042 | 2.267283 | 1.014008 |
data_agg = []
day = 0
first_day = True
overnight = False
extension = ".xls"
if path[-1] != '/':
path += '/'
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:-1])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
np.append(state_end, data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if data[j, 0] > state_end[i]:
data_end.append(j-1)
break
if first_day:
start_time = data[0, 0]
# extract data at those times
for i in range(np.size(data_start)):
t = data[data_start[i]:data_end[i], 0] + day - start_time
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:, 0]),
np.vstack((t, c)).T)
else:
data_agg.append(np.vstack((t, c)).T)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
return data_agg | def get_data_by_state(path, dates, state, column) | Reads a ProCoDA file and extracts the time and data column for each
iteration ofthe given state.
Note: column 0 is time, the first data column is column 1.
:param path: The path to the folder containing the ProCoDA data file(s), defaults to the current directory
:type path: string
:param dates: A single date or list of dates for which data was recorded, formatted "M-D-YYYY"
:type dates: string or string list
:param state: The state ID number for which data should be plotted
:type state: int
:param column: The integer index of the column that you want to extract OR the header of the column that you want to extract
:type column: int or string
:return: A list of lists of the time and data columns extracted for each iteration of the state. For example, if "data" is the output, data[i][:,0] gives the time column and data[i][:,1] gives the data column for the ith iteration of the given state and column. data[i][0] would give the first [time, data] pair.
:type: list of lists of lists
:Examples:
.. code-block:: python
data = get_data_by_state(path='/Users/.../ProCoDA Data/', dates=["6-19-2013", "6-20-2013"], state=1, column=28) | 2.27844 | 2.269947 | 1.003741 |
df = pd.read_csv(path, delimiter='\t')
start_time = pd.to_numeric(df.iloc[start, 0])*u.day
day_times = pd.to_numeric(df.iloc[start:end, 0])
time_data = np.subtract((np.array(day_times)*u.day), start_time)
return time_data | def column_of_time(path, start, end=-1) | This function extracts the column of times from a ProCoDA data file.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:param start: Index of first row of data to extract from the data file
:type start: int
:param end: Index of last row of data to extract from the data. Defaults to last row
:type end: int
:return: Experimental times starting at 0 day with units of days.
:rtype: numpy.array
:Examples:
.. code-block:: python
time = column_of_time("Reactor_data.txt", 0) | 3.550128 | 4.116375 | 0.86244 |
if not isinstance(start, int):
start = int(start)
if not isinstance(end, int):
end = int(end)
df = pd.read_csv(path, delimiter='\t')
if units == "":
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))
else:
df[column][0:len(df)]
else:
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))*u(units)
else:
df[column][0:len(df)]*u(units)
return data | def column_of_data(path, start, column, end="-1", units="") | This function extracts a column of data from a ProCoDA data file.
Note: Column 0 is time. The first data column is column 1.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:param start: Index of first row of data to extract from the data file
:type start: int
:param end: Index of last row of data to extract from the data. Defaults to last row
:type end: int, optional
:param column: Index of the column that you want to extract OR name of the column header that you want to extract
:type column: int or string
:param units: The units you want to apply to the data, e.g. 'mg/L'. Defaults to "" (dimensionless)
:type units: string, optional
:return: Experimental data with the units applied.
:rtype: numpy.array
:Examples:
.. code-block:: python
data = column_of_data("Reactor_data.txt", 0, 1, -1, "mg/L") | 2.088693 | 2.356549 | 0.886336 |
df = pd.read_csv(path, delimiter='\t')
text_row = df.iloc[0:-1, 0].str.contains('[a-z]', '[A-Z]')
text_row_index = text_row.index[text_row].tolist()
notes = df.loc[text_row_index]
return notes | def notes(path) | This function extracts any experimental notes from a ProCoDA data file.
:param path: The file path of the ProCoDA data file. If the file is in the working directory, then the file name is sufficient.
:type path: string
:return: The rows of the data file that contain text notes inserted during the experiment. Use this to identify the section of the data file that you want to extract.
:rtype: pandas.Dataframe | 4.258571 | 4.412819 | 0.965045 |
outputs = []
metafile = pd.read_csv(path, delimiter='\t', header=None)
metafile = np.array(metafile)
ids = metafile[1:, 0]
if not isinstance(ids[0], str):
ids = list(map(str, ids))
if metaids:
paths = []
for i in range(len(ids)):
if ids[i] in metaids:
paths.append(metafile[i, 4])
else:
paths = metafile[1:, 4]
basepath = os.path.join(os.path.split(path)[0], metafile[0, 4])
# use a loop to evaluate each experiment in the metafile
for i in range(len(paths)):
# get the range of dates for experiment i
day1 = metafile[i+1, 1]
# modify the metafile date so that it works with datetime format
if not (day1[2] == "-" or day1[2] == "/"):
day1 = "0" + day1
if not (day1[5] == "-" or day1[5] == "/"):
day1 = day1[:3] + "0" + day1[3:]
if day1[2] == "-":
dt = datetime.strptime(day1, "%m-%d-%Y")
else:
dt = datetime.strptime(day1, "%m/%d/%y")
duration = metafile[i+1, 3]
if not isinstance(duration, int):
duration = int(duration)
date_list = []
for j in range(duration):
curr_day = dt.strftime("%m-%d-%Y")
if curr_day[3] == "0":
curr_day = curr_day[:3] + curr_day[4:]
if curr_day[0] == "0":
curr_day = curr_day[1:]
date_list.append(curr_day)
dt = dt + timedelta(days=1)
path = str(Path(os.path.join(basepath, paths[i]))) + os.sep
_, data = read_state(date_list, state, column, units, path, extension)
outputs.append(func(data))
return ids, outputs | def read_state_with_metafile(func, state, column, path, metaids=[],
extension=".xls", units="") | Takes in a ProCoDA meta file and performs a function for all data of a
certain state in each of the experiments (denoted by file paths in then
metafile)
Note: Column 0 is time. The first data column is column 1.
:param func: A function that will be applied to data from each instance of the state
:type func: function
:param state: The state ID number for which data should be extracted
:type state: int
:param column: Index of the column that you want to extract OR header of the column that you want to extract
:type column: int or string
:param path: The file path of the ProCoDA data file (must be tab-delimited)
:type path: string
:param metaids: a list of the experiment IDs you'd like to analyze from the metafile
:type metaids: string list, optional
:param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in
:type extension: string, optional
:param units: The units you want to apply to the data, e.g. 'mg/L'. Defaults to "" (dimensionless)
:type units: string, optional
:return: ids (string list) - The list of experiment ids given in the metafile
:return: outputs (list) - The outputs of the given function for each experiment
:Examples:
.. code-block:: python
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
path = "../tests/data/Test Meta File.txt"
ids, answer = read_state_with_metafile(avg_with_units, 1, 28, path, [], ".xls", "mg/L") | 2.642918 | 2.553192 | 1.035142 |
if not isinstance(funcs, list):
funcs = [funcs] * len(headers)
if not isinstance(states, list):
states = [states] * len(headers)
if not isinstance(columns, list):
columns = [columns] * len(headers)
data_agg = []
for i in range(len(headers)):
ids, data = read_state_with_metafile(funcs[i], states[i], columns[i],
path, metaids, extension)
data_agg = np.append(data_agg, [data])
output = pd.DataFrame(data=np.vstack((ids, data_agg)).T,
columns=["ID"]+headers)
output.to_csv(out_name, sep='\t')
return output | def write_calculations_to_csv(funcs, states, columns, path, headers, out_name,
metaids=[], extension=".xls") | Writes each output of the given functions on the given states and data
columns to a new column in the specified output file.
Note: Column 0 is time. The first data column is column 1.
:param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns
:type funcs: function or function list
:param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations
:type states: string or string list
:param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to
:type columns: int, string, int list, or string list
:param path: Path to your ProCoDA metafile (must be tab-delimited)
:type path: string
:param headers: List of the desired header for each calculation, in order
:type headers: string list
:param out_name: Desired name for the output file. Can include a relative path
:type out_name: string
:param metaids: A list of the experiment IDs you'd like to analyze from the metafile
:type metaids: string list, optional
:param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in
:type extension: string, optional
:requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay.
:return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed
:return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file | 2.624745 | 2.66782 | 0.983854 |
B_plate = sed_inputs['plate_settlers']['S'] + sed_inputs['plate_settlers']['thickness']
return math.floor((sed_inputs['plate_settlers']['L_cantilevered'].magnitude / B_plate.magnitude
* np.tan(sed_inputs['plate_settlers']['angle'].to(u.rad).magnitude)) + 1) | def n_sed_plates_max(sed_inputs=sed_dict) | Return the maximum possible number of plate settlers in a module given
plate spacing, thickness, angle, and unsupported length of plate settler.
Parameters
----------
S_plate : float
Edge to edge distance between plate settlers
thickness_plate : float
Thickness of PVC sheet used to make plate settlers
L_sed_plate_cantilevered : float
Maximum length of sed plate sticking out past module pipes without any
additional support. The goal is to prevent floppy modules that don't
maintain constant distances between the plates
angle_plate : float
Angle of plate settlers
Returns
-------
int
Maximum number of plates
Examples
--------
>>> from aide_design.play import*
>>> | 6.972538 | 4.313725 | 1.616361 |
return ((sed_inputs['tank']['vel_up'].to(u.inch/u.s).magnitude /
sed_inputs['manifold']['diffuser']['vel_max'].to(u.inch/u.s).magnitude)
* sed_inputs['tank']['W']) | def w_diffuser_inner_min(sed_inputs=sed_dict) | Return the minimum inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations. Can be found in sed.yaml
Returns
-------
float
Minimum inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 7.348462 | 8.796022 | 0.83543 |
return ut.ceil_nearest(w_diffuser_inner_min(sed_inputs).magnitude,
(np.arange(1/16,1/4,1/16)*u.inch).magnitude) | def w_diffuser_inner(sed_inputs=sed_dict) | Return the inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 11.023753 | 12.79726 | 0.861415 |
return (w_diffuser_inner_min(sed_inputs['tank']['W']) +
(2 * sed_inputs['manifold']['diffuser']['thickness_wall'])).to(u.m).magnitude | def w_diffuser_outer(sed_inputs=sed_dict) | Return the outer width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 13.32072 | 16.051031 | 0.829898 |
return ((sed_inputs['manifold']['diffuser']['A'] /
(2 * sed_inputs['manifold']['diffuser']['thickness_wall']))
- w_diffuser_inner(sed_inputs).to(u.inch)).to(u.m).magnitude | def L_diffuser_outer(sed_inputs=sed_dict) | Return the outer length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 10.777956 | 12.161643 | 0.886225 |
return L_diffuser_outer(sed_inputs['tank']['W']) -
(2 * (sed_inputs['manifold']['diffuser']['thickness_wall']).to(u.m)).magnitude) | def L_diffuser_inner(sed_inputs=sed_dict) | Return the inner length of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner length of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 16.582094 | 18.421324 | 0.900158 |
return (sed_inputs['tank']['vel_up'].to(u.m/u.s) *
sed_inputs['tank']['W'].to(u.m) *
L_diffuser_outer(sed_inputs)).magnitude | def q_diffuser(sed_inputs=sed_dict) | Return the flow through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 8.997475 | 10.19538 | 0.882505 |
return (q_diffuser(sed_inputs).magnitude
/ (w_diffuser_inner(w_tank) * L_diffuser_inner(w_tank)).magnitude) | def vel_sed_diffuser(sed_inputs=sed_dict) | Return the velocity through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 12.547677 | 15.622301 | 0.80319 |
return (sed_inputs['tank']['L'] * sed_inputs['tank']['vel_up'].to(u.m/u.s) *
sed_inputs['tank']['W'].to(u.m)).magnitude | def q_tank(sed_inputs=sed_dict) | Return the maximum flow through one sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Maximum flow through one sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | 5.612858 | 7.455789 | 0.752819 |
vel_manifold_max = (sed_inputs['diffuser']['vel_max'].to(u.m/u.s).magnitude *
sqrt(2*((1-(sed_inputs['manifold']['ratio_Q_man_orifice'])**2)) /
(((sed_inputs['manifold']['ratio_Q_man_orifice'])**2)+1)))
return vel_manifold_max | def vel_inlet_man_max(sed_inputs=sed_dict) | Return the maximum velocity through the manifold.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Maximum velocity through the manifold.
Examples
--------
>>> from aide_design.play import*
>>> | 5.784726 | 6.812671 | 0.849113 |
q = q_tank(sed_inputs).magnitude
return (int(np.ceil(Q_plant / q))) | def n_tanks(Q_plant, sed_inputs=sed_dict) | Return the number of sedimentation tanks required for a given flow rate.
Parameters
----------
Q_plant : float
Total plant flow rate
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
int
Number of sedimentation tanks required for a given flow rate.
Examples
--------
>>> from aide_design.play import*
>>> | 9.222693 | 15.561614 | 0.592657 |
n_tanks = n_tanks(Q_plant, sed_inputs)
return ((n_tanks * sed_inputs['tank']['W']) + sed_inputs['thickness_wall'] +
((n_tanks-1) * sed_inputs['thickness_wall'])) | def L_channel(Q_plant, sed_inputs=sed_dict) | Return the length of the inlet and exit channels for the sedimentation tank.
Parameters
----------
Q_plant : float
Total plant flow rate
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Length of the inlet and exit channels for the sedimentation tank.
Examples
--------
>>> from aide_design.play import*
>>> | 5.972497 | 5.967499 | 1.000838 |
#Inputs do not need to be checked here because they are checked by
#functions this function calls.
nu = pc.viscosity_dynamic(temp)
hl = sed_input['manifold']['exit_man']['hl_orifice'].to(u.m)
L = sed_ipnut['manifold']['tank']['L']
N_orifices = sed_inputs['manifold']['exit_man']['N_orifices']
K_minor = con.K_MINOR_PIPE_EXIT
pipe_rough = mat.PIPE_ROUGH_PVC.to(u.m)
D = max(diam_pipemajor(Q_plant, hl, L, nu, pipe_rough).magnitude,
diam_pipeminor(Q_plant, hl, K_minor).magnitude)
err = 1.00
while err > 0.01:
D_prev = D
f = pc.fric(Q_plant, D_prev, nu, pipe_rough)
D = ((8*Q_plant**2 / pc.GRAVITY.magnitude * np.pi**2 * hl) *
(((f*L/D_prev + K_minor) * (1/3 * 1/) *
(1/3 + 1/(2 * N_orifices) + 1/(6 * N_orifices**2)))
/ (1 - sed_inputs['manifold']['ratio_Q_orifice']**2)))**0.25
err = abs(D_prev - D) / ((D + D_prev) / 2)
return D | def ID_exit_man(Q_plant, temp, sed_inputs=sed_dict) | Return the inner diameter of the exit manifold by guessing an initial
diameter then iterating through pipe flow calculations until the answer
converges within 1%% error
Parameters
----------
Q_plant : float
Total plant flow rate
temp : float
Design temperature
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner diameter of the exit manifold
Examples
--------
>>> from aide_design.play import*
>>> | 7.726044 | 7.686468 | 1.005149 |
Q_orifice = Q_plant/sed_input['exit_man']['N_orifices']
D_orifice = np.sqrt(Q_orifice**4)/(np.pi * con.RATIO_VC_ORIFICE * np.sqrt(2 * pc.GRAVITY.magnitude * sed_input['exit_man']['hl_orifice'].magnitude))
return ut.ceil_nearest(D_orifice, drill_bits) | def D_exit_man_orifice(Q_plant, drill_bits, sed_inputs=sed_dict) | Return the diameter of the orifices in the exit manifold for the sedimentation tank.
Parameters
----------
Q_plant : float
Total plant flow rate
drill_bits : list
List of possible drill bit sizes
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Diameter of the orifices in the exit manifold for the sedimentation tank.
Examples
--------
>>> from aide_design.play import*
>>> | 8.415298 | 9.095009 | 0.925265 |
L_sed_plate = ((sed_input['plate_settlers']['S'] * ((sed_input['tank']['vel_up']/sed_input['plate_settlers']['vel_capture'])-1)
+ sed_input['plate_settlers']['thickness'] * (sed_input['tank']['vel_up']/sed_input['plate_settlers']['vel_capture']))
/ (np.sin(sed_input['plate_settlers']['angle']) * np.cos(sed_input['plate_settlers']['angle']))
).to(u.m)
return L_sed_plate | def L_sed_plate(sed_inputs=sed_dict) | Return the length of a single plate in the plate settler module based on
achieving the desired capture velocity
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Length of a single plate
Examples
--------
>>> from aide_design.play import*
>>> | 4.016018 | 3.91708 | 1.025258 |
index = (np.abs(np.array(pipedb['NDinch']) - (ND))).argmin()
return pipedb.iloc[index, 1] | def OD(ND) | Return a pipe's outer diameter according to its nominal diameter.
The pipe schedule is not required here because all of the pipes of a
given nominal diameter have the same outer diameter.
Steps:
1. Find the index of the closest nominal diameter.
(Should this be changed to find the next largest ND?)
2. Take the values of the array, subtract the ND, take the absolute
value, find the index of the minimium value. | 9.855004 | 7.950272 | 1.239581 |
myindex = (np.abs(np.array(pipedb['NDinch']) - (ND))).argmin()
return (pipedb.iloc[myindex, 1] - 2*(pipedb.iloc[myindex, 5])) | def ID_sch40(ND) | Return the inner diameter for schedule 40 pipes.
The wall thickness for these pipes is in the pipedb.
Take the values of the array, subtract the ND, take the absolute
value, find the index of the minimium value. | 7.713631 | 4.53336 | 1.701526 |
ND_all_available = []
for i in range(len(pipedb['NDinch'])):
if pipedb.iloc[i, 4] == 1:
ND_all_available.append((pipedb['NDinch'][i]))
return ND_all_available * u.inch | def ND_all_available() | Return an array of available nominal diameters.
NDs available are those commonly used as based on the 'Used' column
in the pipedb. | 5.681035 | 3.860897 | 1.471429 |
ID = []
ND = ND_all_available()
for i in range(len(ND)):
ID.append(ID_SDR(ND[i], SDR).magnitude)
return ID * u.inch | def ID_SDR_all_available(SDR) | Return an array of inner diameters with a given SDR.
IDs available are those commonly used based on the 'Used' column
in the pipedb. | 6.209229 | 6.361308 | 0.976093 |
for i in range(len(np.array(ID_SDR_all_available(SDR)))):
if np.array(ID_SDR_all_available(SDR))[i] >= (ID.to(u.inch)).magnitude:
return ND_all_available()[i] | def ND_SDR_available(ID, SDR) | Return an available ND given an ID and a schedule.
Takes the values of the array, compares to the ID, and finds the index
of the first value greater or equal. | 5.789641 | 5.249004 | 1.102998 |
# Ensure all the arguments except total headloss are the same length
#TODO
# Total number of pipe lengths
n = diameters.size
# Start with a flow rate guess based on the flow through a single pipe section
flow = pc.flow_pipe(diameters[0], target_headloss, lengths[0], nu, pipe_rough, k_minors[0])
err = 1.0
# Add all the pipe length headlosses together to test the error
while abs(err) > 0.01 :
headloss = sum([pc.headloss(flow, diameters[i], lengths[i], nu, pipe_rough,
k_minors[i]).to(u.m).magnitude for i in range(n)])
# Test the error. This is always less than one.
err = (target_headloss - headloss) / (target_headloss + headloss)
# Adjust the total flow in the direction of the error. If there is more headloss than target headloss,
# The flow should be reduced, and vice-versa.
flow = flow + err * flow
return flow | def flow_pipeline(diameters, lengths, k_minors, target_headloss,
nu=con.WATER_NU, pipe_rough=mats.PVC_PIPE_ROUGH) | This function takes a single pipeline with multiple sections, each potentially with different diameters,
lengths and minor loss coefficients and determines the flow rate for a given headloss.
:param diameters: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type diameters: numpy.ndarray
:param lengths: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type lengths: numpy.ndarray
:param k_minors: list of diameters, where the i_th diameter corresponds to the i_th pipe section
:type k_minors: numpy.ndarray
:param target_headloss: a single headloss describing the total headloss through the system
:type target_headloss: float
:param nu: The fluid dynamic viscosity of the fluid. Defaults to water at room temperature (1 * 10**-6 * m**2/s)
:type nu: float
:param pipe_rough: The pipe roughness. Defaults to PVC roughness.
:type pipe_rough: float
:return: the total flow through the system
:rtype: float | 5.919502 | 5.668377 | 1.044303 |
w_per_flow = 2 / ((2 * pc.gravity * z) ** (1 / 2) *
con.VC_ORIFICE_RATIO * np.pi * self.hl)
return w_per_flow.to_base_units() | def stout_w_per_flow(self, z) | Return the width of a Stout weir at elevation z. More info
here. <https://confluence.cornell.edu/display/AGUACLARA/
LFOM+sutro+weir+research> | 12.372258 | 10.47699 | 1.180898 |
N_estimated = (self.hl * np.pi / (2 * self.stout_w_per_flow(self.hl) * self.q)).to(u.dimensionless)
variablerow = min(10, max(4, math.trunc(N_estimated.magnitude)))
return variablerow | def n_rows(self) | This equation states that the open area corresponding to one row
can be set equal to two orifices of diameter=row height. If there
are more than two orifices per row at the top of the LFOM then there
are more orifices than are convenient to drill and more than
necessary for good accuracy. Thus this relationship can be used to
increase the spacing between the rows and thus increase the diameter
of the orifices. This spacing function also sets the lower depth on
the high flow rate LFOM with no accurate flows below a depth equal
to the first row height.
But it might be better to always set then number of rows to 10.
The challenge is to figure out a reasonable system of constraints that
reliably returns a valid solution. | 15.357656 | 13.576065 | 1.13123 |
return (4 / (3 * math.pi) * (2 * pc.gravity * self.hl) ** (1 / 2)).to(u.m/u.s) | def vel_critical(self) | The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices The speed of
falling water is 0.841 m/s for all linear flow orifice meters of
height 20 cm, independent of total plant flow rate. | 10.712917 | 10.791332 | 0.992734 |
return (self.safety_factor * self.q / self.vel_critical).to(u.cm**2) | def area_pipe_min(self) | The minimum cross-sectional area of the LFOM pipe that assures
a safety factor. | 16.799139 | 12.799801 | 1.312453 |
ID = pc.diam_circle(self.area_pipe_min)
return pipe.ND_SDR_available(ID, self.sdr) | def nom_diam_pipe(self) | The nominal diameter of the LFOM pipe | 35.347866 | 39.936367 | 0.885105 |
# Calculate the center of the top row:
z = self.hl - 0.5 * self.b_rows
# Multiply the stout weir width by the height of one row.
return self.stout_w_per_flow(z) * self.q * self.b_rows | def area_top_orifice(self) | Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice | 16.598978 | 9.87356 | 1.681154 |
maxdrill = min(self.b_rows, self.d_orifice_max)
return ut.floor_nearest(maxdrill, self.drill_bits) | def orifice_diameter(self) | The actual orifice diameter. We don't let the diameter extend
beyond its row space. | 19.903542 | 17.115694 | 1.162883 |
c = math.pi * pipe.ID_SDR(self.nom_diam_pipe, self.sdr)
b = self.orifice_diameter + self.s_orifice
return math.floor(c/b) | def n_orifices_per_row_max(self) | A bound on the number of orifices allowed in each row.
The distance between consecutive orifices must be enough to retain
structural integrity of the pipe. | 17.070496 | 13.792967 | 1.237623 |
return np.linspace(1 / self.n_rows, 1, self.n_rows)*self.q | def flow_ramp(self) | An equally spaced array representing flow at each row. | 9.374066 | 5.279451 | 1.775576 |
return (np.linspace(0, self.n_rows-1, self.n_rows))*self.b_rows + 0.5 * self.orifice_diameter | def height_orifices(self) | Calculates the height of the center of each row of orifices.
The bottom of the bottom row orifices is at the zero elevation
point of the LFOM so that the flow goes to zero when the water height
is at zero. | 9.46137 | 6.342576 | 1.491724 |
flow = 0
for i in range(Row_Index_Submerged + 1):
flow = flow + (N_LFOM_Orifices[i] * (
pc.flow_orifice_vert(self.orifice_diameter,
self.b_rows*(Row_Index_Submerged + 1)
- self.height_orifices[i],
con.VC_ORIFICE_RATIO)))
return flow | def flow_actual(self, Row_Index_Submerged, N_LFOM_Orifices) | Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the
orifices when the water is at the max level.
Parameters
----------
Row_Index_Submerged: int
The index of the submerged row. All rows below and including this
index are submerged.
N_LFOM_Orifices: [int]
The number of orifices at each row.
Returns
--------
The flow through all of the orifices that are submerged. | 6.37429 | 6.160331 | 1.034732 |
# H is distance from the bottom of the next row of orifices to the
# center of the current row of orifices
H = self.b_rows - 0.5*self.orifice_diameter
flow_per_orifice = pc.flow_orifice_vert(self.orifice_diameter, H, con.VC_ORIFICE_RATIO)
n = np.zeros(self.n_rows)
for i in range(self.n_rows):
# calculate the ideal number of orifices at the current row without
# constraining to an integer
flow_needed = self.flow_ramp[i] - self.flow_actual(i, n)
n_orifices_real = (flow_needed / flow_per_orifice).to(u.dimensionless)
# constrain number of orifices to be less than the max per row and
# greater or equal to 0
n[i] = min((max(0, round(n_orifices_real))), self.n_orifices_per_row_max)
return n | def n_orifices_per_row(self) | Calculate number of orifices at each level given an orifice
diameter. | 5.078629 | 4.772974 | 1.064039 |
FLOW_lfom_error = np.zeros(self.n_rows)
for i in range(self.n_rows):
actual_flow = self.flow_actual(i, self.n_orifices_per_row)
FLOW_lfom_error[i] = (((actual_flow - self.flow_ramp[i]) / self.flow_ramp[i]).to(u.dimensionless)).magnitude
return FLOW_lfom_error | def error_per_row(self) | This function calculates the error of the design based on the
differences between the predicted flow rate
and the actual flow rate through the LFOM. | 4.967098 | 3.988615 | 1.245319 |
step_32nd = np.arange(0.03125, 0.25, 0.03125)
step_8th = np.arange(0.25, 1.0, 0.125)
step_4th = np.arange(1.0, 2.0, 0.25)
maximum = [2.0]
return np.concatenate((step_32nd,
step_8th,
step_4th,
maximum)) * u.inch | def get_drill_bits_d_imperial() | Return array of possible drill diameters in imperial. | 3.107889 | 2.894379 | 1.073767 |
return np.concatenate((np.arange(1.0, 10.0, 0.1),
np.arange(10.0, 18.0, 0.5),
np.arange(18.0, 36.0, 1.0),
np.arange(40.0, 55.0, 5.0))) * u.mm | def get_drill_bits_d_metric() | Return array of possible drill diameters in metric. | 2.570204 | 2.258921 | 1.137802 |
return self._C_sys * (self._Q_sys / self._Q_stock).to(u.dimensionless) | def C_stock(self) | Return the required concentration of material in the stock given a
reactor's desired system flow rate, system concentration, and stock
flow rate.
:return: Concentration of material in the stock
:rtype: float | 9.442092 | 8.128115 | 1.161658 |
return Stock.T_stock(self, V_stock, self._Q_stock).to(u.hr) | def T_stock(self, V_stock) | Return the amount of time at which the stock of materal will be
depleted.
:param V_stock: Volume of the stock of material
:type V_stock: float
:return: Time at which the stock will be depleted
:rtype: float | 11.335405 | 12.702152 | 0.8924 |
return Stock.V_super_stock(self, V_stock, self.C_stock(), C_super_stock) | def V_super_stock(self, V_stock, C_super_stock) | Return the volume of super (more concentrated) stock that must be
diluted for the desired stock volume and required stock concentration.
:param V_stock: Volume of the stock of material
:type V_stock: float
:param C_super_stock: Concentration of the super stock
:type C_super_stock: float
:return: Volume of super stock to dilute
:rtype: float | 5.582066 | 6.82104 | 0.81836 |
return self._Q_sys * (self._C_sys / self._C_stock).to(u.dimensionless) | def Q_stock(self) | Return the required flow rate from the stock of material given
a reactor's desired system flow rate, system concentration, and stock
concentration.
:return: Flow rate from the stock of material
:rtype: float | 9.400718 | 7.630233 | 1.232036 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.